diff options
Diffstat (limited to 'arch')
705 files changed, 24530 insertions, 13875 deletions
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index 4b18cd94d59d..6ff8886e7e22 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c @@ -19,15 +19,18 @@ void foo(void) BLANK(); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); - DEFINE(TASK_UID, offsetof(struct task_struct, uid)); - DEFINE(TASK_EUID, offsetof(struct task_struct, euid)); - DEFINE(TASK_GID, offsetof(struct task_struct, gid)); - DEFINE(TASK_EGID, offsetof(struct task_struct, egid)); + DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); + DEFINE(CRED_UID, offsetof(struct cred, uid)); + DEFINE(CRED_EUID, offsetof(struct cred, euid)); + DEFINE(CRED_GID, offsetof(struct cred, gid)); + DEFINE(CRED_EGID, offsetof(struct cred, egid)); + BLANK(); + DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index 5fc61e281ac7..f77345bc66a9 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -850,8 +850,9 @@ osf_getpriority: sys_getxuid: .prologue 0 ldq $2, TI_TASK($8) - ldl $0, TASK_UID($2) - ldl $1, TASK_EUID($2) + ldq $3, TASK_CRED($2) + ldl $0, CRED_UID($3) + ldl $1, CRED_EUID($3) stq $1, 80($sp) ret .end sys_getxuid @@ -862,8 +863,9 @@ sys_getxuid: sys_getxgid: .prologue 0 ldq $2, TI_TASK($8) - ldl $0, TASK_GID($2) - ldl $1, TASK_EGID($2) + ldq $3, TASK_CRED($2) + ldl $0, CRED_GID($3) + ldl $1, CRED_EGID($3) stq $1, 80($sp) ret .end sys_getxgid diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c index e7c6386782ed..5add22fc9899 100644 --- a/arch/arm/mach-ixp4xx/fsg-setup.c +++ b/arch/arm/mach-ixp4xx/fsg-setup.c @@ -177,7 +177,6 @@ static irqreturn_t fsg_reset_handler(int irq, void *dev_id) static void __init fsg_init(void) { - DECLARE_MAC_BUF(mac_buf); uint8_t __iomem *f; ixp4xx_sys_init(); @@ -256,10 +255,10 @@ static void __init fsg_init(void) #endif iounmap(f); } - printk(KERN_INFO "FSG: Using MAC address %s for port 0\n", - print_mac(mac_buf, fsg_plat_eth[0].hwaddr)); - printk(KERN_INFO "FSG: Using MAC address %s for port 1\n", - print_mac(mac_buf, fsg_plat_eth[1].hwaddr)); + printk(KERN_INFO "FSG: Using MAC address %pM for port 0\n", + fsg_plat_eth[0].hwaddr); + printk(KERN_INFO "FSG: Using MAC address %pM for port 1\n", + fsg_plat_eth[1].hwaddr); } diff --git a/arch/arm/mach-ixp4xx/include/mach/qmgr.h b/arch/arm/mach-ixp4xx/include/mach/qmgr.h index 1e52b95cede5..0cbe6ceb67c5 100644 --- a/arch/arm/mach-ixp4xx/include/mach/qmgr.h +++ b/arch/arm/mach-ixp4xx/include/mach/qmgr.h @@ -12,6 +12,8 @@ #include <linux/io.h> #include <linux/kernel.h> +#define DEBUG_QMGR 0 + #define HALF_QUEUES 32 #define QUEUES 64 /* only 32 lower queues currently supported */ #define MAX_QUEUE_LENGTH 4 /* in dwords */ @@ -61,22 +63,51 @@ void qmgr_enable_irq(unsigned int queue); void qmgr_disable_irq(unsigned int queue); /* request_ and release_queue() must be called from non-IRQ context */ + +#if DEBUG_QMGR +extern char qmgr_queue_descs[QUEUES][32]; + int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, unsigned int nearly_empty_watermark, - unsigned int nearly_full_watermark); + unsigned int nearly_full_watermark, + const char *desc_format, const char* name); +#else +int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, + unsigned int nearly_empty_watermark, + unsigned int nearly_full_watermark); +#define qmgr_request_queue(queue, len, nearly_empty_watermark, \ + nearly_full_watermark, desc_format, name) \ + __qmgr_request_queue(queue, len, nearly_empty_watermark, \ + nearly_full_watermark) +#endif + void qmgr_release_queue(unsigned int queue); static inline void qmgr_put_entry(unsigned int queue, u32 val) { extern struct qmgr_regs __iomem *qmgr_regs; +#if DEBUG_QMGR + BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ + + printk(KERN_DEBUG "Queue %s(%i) put %X\n", + qmgr_queue_descs[queue], queue, val); +#endif __raw_writel(val, &qmgr_regs->acc[queue][0]); } static inline u32 qmgr_get_entry(unsigned int queue) { + u32 val; extern struct qmgr_regs __iomem *qmgr_regs; - return __raw_readl(&qmgr_regs->acc[queue][0]); + val = __raw_readl(&qmgr_regs->acc[queue][0]); +#if DEBUG_QMGR + BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ + + printk(KERN_DEBUG "Queue %s(%i) get %X\n", + qmgr_queue_descs[queue], queue, val); +#endif + return val; } static inline int qmgr_get_stat1(unsigned int queue) diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c index c6cb069a5a83..bfddc73d0a20 100644 --- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c +++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c @@ -14,8 +14,6 @@ #include <linux/module.h> #include <mach/qmgr.h> -#define DEBUG 0 - struct qmgr_regs __iomem *qmgr_regs; static struct resource *mem_res; static spinlock_t qmgr_lock; @@ -23,6 +21,10 @@ static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ static void (*irq_handlers[HALF_QUEUES])(void *pdev); static void *irq_pdevs[HALF_QUEUES]; +#if DEBUG_QMGR +char qmgr_queue_descs[QUEUES][32]; +#endif + void qmgr_set_irq(unsigned int queue, int src, void (*handler)(void *pdev), void *pdev) { @@ -70,6 +72,7 @@ void qmgr_disable_irq(unsigned int queue) spin_lock_irqsave(&qmgr_lock, flags); __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue), &qmgr_regs->irqen[0]); + __raw_writel(1 << queue, &qmgr_regs->irqstat[0]); /* clear */ spin_unlock_irqrestore(&qmgr_lock, flags); } @@ -81,9 +84,16 @@ static inline void shift_mask(u32 *mask) mask[0] <<= 1; } +#if DEBUG_QMGR int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, unsigned int nearly_empty_watermark, - unsigned int nearly_full_watermark) + unsigned int nearly_full_watermark, + const char *desc_format, const char* name) +#else +int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, + unsigned int nearly_empty_watermark, + unsigned int nearly_full_watermark) +#endif { u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ int err; @@ -151,12 +161,13 @@ int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, used_sram_bitmap[2] |= mask[2]; used_sram_bitmap[3] |= mask[3]; __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); - spin_unlock_irq(&qmgr_lock); - -#if DEBUG - printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n", - queue, addr); +#if DEBUG_QMGR + snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]), + desc_format, name); + printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n", + qmgr_queue_descs[queue], queue, addr); #endif + spin_unlock_irq(&qmgr_lock); return 0; err: @@ -189,6 +200,11 @@ void qmgr_release_queue(unsigned int queue) while (addr--) shift_mask(mask); +#if DEBUG_QMGR + printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n", + qmgr_queue_descs[queue], queue); + qmgr_queue_descs[queue][0] = '\x0'; +#endif __raw_writel(0, &qmgr_regs->sram[queue]); used_sram_bitmap[0] &= ~mask[0]; @@ -199,9 +215,10 @@ void qmgr_release_queue(unsigned int queue) spin_unlock_irq(&qmgr_lock); module_put(THIS_MODULE); -#if DEBUG - printk(KERN_DEBUG "qmgr: released queue %i\n", queue); -#endif + + while ((addr = qmgr_get_entry(queue))) + printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n", + queue, addr); } static int qmgr_init(void) @@ -272,5 +289,10 @@ EXPORT_SYMBOL(qmgr_regs); EXPORT_SYMBOL(qmgr_set_irq); EXPORT_SYMBOL(qmgr_enable_irq); EXPORT_SYMBOL(qmgr_disable_irq); +#if DEBUG_QMGR +EXPORT_SYMBOL(qmgr_queue_descs); EXPORT_SYMBOL(qmgr_request_queue); +#else +EXPORT_SYMBOL(__qmgr_request_queue); +#endif EXPORT_SYMBOL(qmgr_release_queue); diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index 0acd95ecf27e..921c947b5b6b 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c @@ -231,7 +231,6 @@ static irqreturn_t nas100d_reset_handler(int irq, void *dev_id) static void __init nas100d_init(void) { - DECLARE_MAC_BUF(mac_buf); uint8_t __iomem *f; int i; @@ -294,8 +293,8 @@ static void __init nas100d_init(void) #endif iounmap(f); } - printk(KERN_INFO "NAS100D: Using MAC address %s for port 0\n", - print_mac(mac_buf, nas100d_plat_eth[0].hwaddr)); + printk(KERN_INFO "NAS100D: Using MAC address %pM for port 0\n", + nas100d_plat_eth[0].hwaddr); } diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index bc9d920ae54f..ff6a08d02cc4 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c @@ -220,7 +220,6 @@ static struct sys_timer nslu2_timer = { static void __init nslu2_init(void) { - DECLARE_MAC_BUF(mac_buf); uint8_t __iomem *f; int i; @@ -275,8 +274,8 @@ static void __init nslu2_init(void) #endif iounmap(f); } - printk(KERN_INFO "NSLU2: Using MAC address %s for port 0\n", - print_mac(mac_buf, nslu2_plat_eth[0].hwaddr)); + printk(KERN_INFO "NSLU2: Using MAC address %pM for port 0\n", + nslu2_plat_eth[0].hwaddr); } diff --git a/arch/arm/mach-pxa/include/mach/palmasoc.h b/arch/arm/mach-pxa/include/mach/palmasoc.h new file mode 100644 index 000000000000..6c4b1f7de20a --- /dev/null +++ b/arch/arm/mach-pxa/include/mach/palmasoc.h @@ -0,0 +1,13 @@ +#ifndef _INCLUDE_PALMASOC_H_ +#define _INCLUDE_PALMASOC_H_ +struct palm27x_asoc_info { + int jack_gpio; +}; + +#ifdef CONFIG_SND_PXA2XX_SOC_PALM27X +void __init palm27x_asoc_set_pdata(struct palm27x_asoc_info *data); +#else +static inline void palm27x_asoc_set_pdata(struct palm27x_asoc_info *data) {} +#endif + +#endif diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile index 522f3c124060..e028d13481a9 100644 --- a/arch/blackfin/boot/Makefile +++ b/arch/blackfin/boot/Makefile @@ -25,7 +25,7 @@ $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE $(obj)/vmImage: $(obj)/vmlinux.gz $(call if_changed,uimage) - @echo 'Kernel: $@ is ready' + @$(kecho) 'Kernel: $@ is ready' install: sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 6bd91ed7cd03..7fa8f615ba6e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -99,7 +99,7 @@ config GENERIC_IOMAP bool default y -config SCHED_NO_NO_OMIT_FRAME_POINTER +config SCHED_OMIT_FRAME_POINTER bool default y diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index 3d47839a0c48..e4d8fde68103 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -167,6 +167,15 @@ netdev_read(int fd, unsigned char *buf, unsigned int len) return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_RECV); } +static const struct net_device_ops simeth_netdev_ops = { + .ndo_open = simeth_open, + .ndo_stop = simeth_close, + .ndo_start_xmit = simeth_tx, + .ndo_get_stats = simeth_get_stats, + .ndo_set_multicast_list = set_multicast_list, /* not yet used */ + +}; + /* * Function shared with module code, so cannot be in init section * @@ -206,14 +215,10 @@ simeth_probe1(void) memcpy(dev->dev_addr, mac_addr, sizeof(mac_addr)); - local = dev->priv; + local = netdev_priv(dev); local->simfd = fd; /* keep track of underlying file descriptor */ - dev->open = simeth_open; - dev->stop = simeth_close; - dev->hard_start_xmit = simeth_tx; - dev->get_stats = simeth_get_stats; - dev->set_multicast_list = set_multicast_list; /* no yet used */ + dev->netdev_ops = &simeth_netdev_ops; err = register_netdev(dev); if (err) { @@ -325,7 +330,7 @@ simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) * we get DOWN then UP. */ - local = dev->priv; + local = netdev_priv(dev); /* now do it for real */ r = event == NETDEV_UP ? netdev_attach(local->simfd, dev->irq, ntohl(ifa->ifa_local)): @@ -380,7 +385,7 @@ frame_print(unsigned char *from, unsigned char *frame, int len) static int simeth_tx(struct sk_buff *skb, struct net_device *dev) { - struct simeth_local *local = dev->priv; + struct simeth_local *local = netdev_priv(dev); #if 0 /* ensure we have at least ETH_ZLEN bytes (min frame size) */ @@ -443,7 +448,7 @@ simeth_rx(struct net_device *dev) int len; int rcv_count = SIMETH_RECV_MAX; - local = dev->priv; + local = netdev_priv(dev); /* * the loop concept has been borrowed from other drivers * looks to me like it's a throttling thing to avoid pushing to many @@ -507,7 +512,7 @@ simeth_interrupt(int irq, void *dev_id) static struct net_device_stats * simeth_get_stats(struct net_device *dev) { - struct simeth_local *local = dev->priv; + struct simeth_local *local = netdev_priv(dev); return &local->stats; } diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 5e92ae00bdbb..16ef61a91d95 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1767,25 +1767,24 @@ groups16_from_user(struct group_info *group_info, short __user *grouplist) asmlinkage long sys32_getgroups16 (int gidsetsize, short __user *grouplist) { + const struct cred *cred = current_cred(); int i; if (gidsetsize < 0) return -EINVAL; - get_group_info(current->group_info); - i = current->group_info->ngroups; + i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->group_info)) { + if (groups16_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->group_info); return i; } diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index fab1d21a4f2c..f94aaa86933f 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -158,7 +158,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) ia64_mlogbuf_dump(); printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", - raw_smp_processor_id(), current->pid, current->uid, + raw_smp_processor_id(), current->pid, current_uid(), iip, ipsr, paddr, current->comm); spin_lock(&mca_bh_lock); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6543a5547c84..0e499757309b 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2220,8 +2220,8 @@ pfm_alloc_file(pfm_context_t *ctx) DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); inode->i_mode = S_IFCHR|S_IRUGO; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); sprintf(name, "[%lu]", inode->i_ino); this.name = name; @@ -2399,22 +2399,33 @@ error_kmem: static int pfm_bad_permissions(struct task_struct *task) { + const struct cred *tcred; + uid_t uid = current_uid(); + gid_t gid = current_gid(); + int ret; + + rcu_read_lock(); + tcred = __task_cred(task); + /* inspired by ptrace_attach() */ DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", - current->uid, - current->gid, - task->euid, - task->suid, - task->uid, - task->egid, - task->sgid)); - - return ((current->uid != task->euid) - || (current->uid != task->suid) - || (current->uid != task->uid) - || (current->gid != task->egid) - || (current->gid != task->sgid) - || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE); + uid, + gid, + tcred->euid, + tcred->suid, + tcred->uid, + tcred->egid, + tcred->sgid)); + + ret = ((uid != tcred->euid) + || (uid != tcred->suid) + || (uid != tcred->uid) + || (gid != tcred->egid) + || (gid != tcred->sgid) + || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE); + + rcu_read_unlock(); + return ret; } static int diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e12500a9c443..e1821ca4c7df 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -229,7 +229,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); - si.si_uid = current->uid; + si.si_uid = current_uid(); si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); return retval; @@ -326,7 +326,7 @@ force_sigsegv_info (int sig, void __user *addr) si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); - si.si_uid = current->uid; + si.si_uid = current_uid(); si.si_addr = addr; force_sig_info(SIGSEGV, &si, current); return 0; diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index dbaed4a63815..29047d5c259a 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -273,7 +273,7 @@ config GENERIC_CALIBRATE_DELAY bool default y -config SCHED_NO_NO_OMIT_FRAME_POINTER +config SCHED_OMIT_FRAME_POINTER bool default y diff --git a/arch/m68k/fpsp040/setox.S b/arch/m68k/fpsp040/setox.S index 145af5447581..f1acf7e36d6b 100644 --- a/arch/m68k/fpsp040/setox.S +++ b/arch/m68k/fpsp040/setox.S @@ -36,9 +36,9 @@ | depending on their values, the program may run faster or slower -- | but no worse than 10% slower even in the extreme cases. | -| The program setoxm1 takes approximately ???/??? cycles for input +| The program setoxm1 takes approximately ??? / ??? cycles for input | argument X, 0.25 <= |X| < 70log2. For |X| < 0.25, it takes -| approximately ???/??? cycles. For the less common arguments, +| approximately ??? / ??? cycles. For the less common arguments, | depending on their values, the program may run faster or slower -- | but no worse than 10% slower even in the extreme cases. | diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c index c7b25b0aacff..245d16d078ad 100644 --- a/arch/m68k/mac/baboon.c +++ b/arch/m68k/mac/baboon.c @@ -18,11 +18,14 @@ #include <asm/macints.h> #include <asm/mac_baboon.h> -/* #define DEBUG_BABOON */ /* #define DEBUG_IRQS */ +extern void mac_enable_irq(unsigned int); +extern void mac_disable_irq(unsigned int); + int baboon_present; static volatile struct baboon *baboon; +static unsigned char baboon_disabled; #if 0 extern int macide_ack_intr(struct ata_channel *); @@ -88,34 +91,51 @@ static irqreturn_t baboon_irq(int irq, void *dev_id) void __init baboon_register_interrupts(void) { - request_irq(IRQ_NUBUS_C, baboon_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST, - "baboon", (void *) baboon); + baboon_disabled = 0; + request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon); } -void baboon_irq_enable(int irq) { +/* + * The means for masking individual baboon interrupts remains a mystery, so + * enable the umbrella interrupt only when no baboon interrupt is disabled. + */ + +void baboon_irq_enable(int irq) +{ + int irq_idx = IRQ_IDX(irq); + #ifdef DEBUG_IRQUSE printk("baboon_irq_enable(%d)\n", irq); #endif - /* FIXME: figure out how to mask and unmask baboon interrupt sources */ - enable_irq(IRQ_NUBUS_C); + + baboon_disabled &= ~(1 << irq_idx); + if (!baboon_disabled) + mac_enable_irq(IRQ_NUBUS_C); } -void baboon_irq_disable(int irq) { +void baboon_irq_disable(int irq) +{ + int irq_idx = IRQ_IDX(irq); + #ifdef DEBUG_IRQUSE printk("baboon_irq_disable(%d)\n", irq); #endif - disable_irq(IRQ_NUBUS_C); + + baboon_disabled |= 1 << irq_idx; + if (baboon_disabled) + mac_disable_irq(IRQ_NUBUS_C); } -void baboon_irq_clear(int irq) { - int irq_idx = IRQ_IDX(irq); +void baboon_irq_clear(int irq) +{ + int irq_idx = IRQ_IDX(irq); baboon->mb_ifr &= ~(1 << irq_idx); } int baboon_irq_pending(int irq) { - int irq_idx = IRQ_IDX(irq); + int irq_idx = IRQ_IDX(irq); return baboon->mb_ifr & (1 << irq_idx); } diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index c45e18449f32..8819b97be324 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -162,10 +162,7 @@ void __init config_mac(void) mach_init_IRQ = mac_init_IRQ; mach_get_model = mac_get_model; mach_gettimeoffset = mac_gettimeoffset; -#warning move to adb/via init -#if 0 mach_hwclk = mac_hwclk; -#endif mach_set_clock_mmss = mac_set_clock_mmss; mach_reset = mac_reset; mach_halt = mac_poweroff; diff --git a/arch/m68k/mac/debug.c b/arch/m68k/mac/debug.c index 2165740786a5..65dd77a742a3 100644 --- a/arch/m68k/mac/debug.c +++ b/arch/m68k/mac/debug.c @@ -24,7 +24,6 @@ #define BOOTINFO_COMPAT_1_0 #include <asm/setup.h> #include <asm/bootinfo.h> -#include <asm/machw.h> #include <asm/macints.h> extern unsigned long mac_videobase; diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c index ecddac4a02b9..82e560c076ce 100644 --- a/arch/m68k/mac/macints.c +++ b/arch/m68k/mac/macints.c @@ -127,7 +127,6 @@ #include <asm/irq.h> #include <asm/traps.h> #include <asm/bootinfo.h> -#include <asm/machw.h> #include <asm/macintosh.h> #include <asm/mac_via.h> #include <asm/mac_psc.h> @@ -215,8 +214,8 @@ irqreturn_t mac_debug_handler(int, void *); /* #define DEBUG_MACINTS */ -static void mac_enable_irq(unsigned int irq); -static void mac_disable_irq(unsigned int irq); +void mac_enable_irq(unsigned int irq); +void mac_disable_irq(unsigned int irq); static struct irq_controller mac_irq_controller = { .name = "mac", @@ -275,7 +274,7 @@ void __init mac_init_IRQ(void) * These routines are just dispatchers to the VIA/OSS/PSC routines. */ -static void mac_enable_irq(unsigned int irq) +void mac_enable_irq(unsigned int irq) { int irq_src = IRQ_SRC(irq); @@ -308,7 +307,7 @@ static void mac_enable_irq(unsigned int irq) } } -static void mac_disable_irq(unsigned int irq) +void mac_disable_irq(unsigned int irq) { int irq_src = IRQ_SRC(irq); diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 56d1f5676ade..a44c7086ab39 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -93,7 +93,7 @@ static void cuda_write_pram(int offset, __u8 data) #define cuda_write_pram NULL #endif -#ifdef CONFIG_ADB_PMU68K +#if 0 /* def CONFIG_ADB_PMU68K */ static long pmu_read_time(void) { struct adb_request req; @@ -148,7 +148,7 @@ static void pmu_write_pram(int offset, __u8 data) #define pmu_write_pram NULL #endif -#ifdef CONFIG_ADB_MACIISI +#if 0 /* def CONFIG_ADB_MACIISI */ extern int maciisi_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...); @@ -717,13 +717,18 @@ int mac_hwclk(int op, struct rtc_time *t) unmktime(now, 0, &t->tm_year, &t->tm_mon, &t->tm_mday, &t->tm_hour, &t->tm_min, &t->tm_sec); +#if 0 printk("mac_hwclk: read %04d-%02d-%-2d %02d:%02d:%02d\n", - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); + t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, + t->tm_hour, t->tm_min, t->tm_sec); +#endif } else { /* write */ +#if 0 printk("mac_hwclk: tried to write %04d-%02d-%-2d %02d:%02d:%02d\n", - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); + t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, + t->tm_hour, t->tm_min, t->tm_sec); +#endif -#if 0 /* it trashes my rtc */ now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); @@ -742,7 +747,6 @@ int mac_hwclk(int op, struct rtc_time *t) case MAC_ADB_IISI: maciisi_write_time(now); } -#endif } return 0; } diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 43d83e054b8e..8426501119ca 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -21,7 +21,6 @@ #include <linux/init.h> #include <asm/bootinfo.h> -#include <asm/machw.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 1bdb03c73c0f..f01d418e64fe 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -32,15 +32,10 @@ #include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> -#include <asm/machw.h> #include <asm/mac_via.h> #include <asm/mac_psc.h> volatile __u8 *via1, *via2; -#if 0 -/* See note in mac_via.h about how this is possibly not useful */ -volatile long *via_memory_bogon=(long *)&via_memory_bogon; -#endif int rbv_present; int via_alt_mapping; EXPORT_SYMBOL(via_alt_mapping); @@ -66,7 +61,7 @@ static int gIER,gIFR,gBufA,gBufB; #define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF) #define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8) -/* To disable a NuBus slot on Quadras we make the slot IRQ lines outputs, set +/* To disable a NuBus slot on Quadras we make that slot IRQ line an output set * high. On RBV we just use the slot interrupt enable register. On Macs with * genuine VIA chips we must use nubus_disabled to keep track of disabled slot * interrupts. When any slot IRQ is disabled we mask the (edge triggered) CA1 @@ -180,7 +175,7 @@ void __init via_init(void) via1[vT1CH] = 0; via1[vT2CL] = 0; via1[vT2CH] = 0; - via1[vACR] &= 0x3F; + via1[vACR] &= ~0xC0; /* setup T1 timer with no PB7 output */ via1[vACR] &= ~0x03; /* disable port A & B latches */ /* @@ -203,40 +198,41 @@ void __init via_init(void) /* Everything below this point is VIA2/RBV only... */ - if (oss_present) return; + if (oss_present) + return; -#if 1 /* Some machines support an alternate IRQ mapping that spreads */ /* Ethernet and Sound out to their own autolevel IRQs and moves */ /* VIA1 to level 6. A/UX uses this mapping and we do too. Note */ /* that the IIfx emulates this alternate mapping using the OSS. */ - switch(macintosh_config->ident) { - case MAC_MODEL_P475: - case MAC_MODEL_P475F: - case MAC_MODEL_P575: - case MAC_MODEL_Q605: - case MAC_MODEL_Q605_ACC: - case MAC_MODEL_C610: - case MAC_MODEL_Q610: - case MAC_MODEL_Q630: - case MAC_MODEL_C650: - case MAC_MODEL_Q650: - case MAC_MODEL_Q700: - case MAC_MODEL_Q800: - case MAC_MODEL_Q900: - case MAC_MODEL_Q950: + via_alt_mapping = 0; + if (macintosh_config->via_type == MAC_VIA_QUADRA) + switch (macintosh_config->ident) { + case MAC_MODEL_C660: + case MAC_MODEL_Q840: + /* not applicable */ + break; + case MAC_MODEL_P588: + case MAC_MODEL_TV: + case MAC_MODEL_PB140: + case MAC_MODEL_PB145: + case MAC_MODEL_PB160: + case MAC_MODEL_PB165: + case MAC_MODEL_PB165C: + case MAC_MODEL_PB170: + case MAC_MODEL_PB180: + case MAC_MODEL_PB180C: + case MAC_MODEL_PB190: + case MAC_MODEL_PB520: + /* not yet tested */ + break; + default: via_alt_mapping = 1; via1[vDirB] |= 0x40; via1[vBufB] &= ~0x40; break; - default: - via_alt_mapping = 0; - break; - } -#else - via_alt_mapping = 0; -#endif + } /* * Now initialize VIA2. For RBV we just kill all interrupts; @@ -252,14 +248,17 @@ void __init via_init(void) via2[vT1CH] = 0; via2[vT2CL] = 0; via2[vT2CH] = 0; - via2[vACR] &= 0x3F; + via2[vACR] &= ~0xC0; /* setup T1 timer with no PB7 output */ via2[vACR] &= ~0x03; /* disable port A & B latches */ } /* - * Set vPCR for SCSI interrupts (but not on RBV) + * Set vPCR for control line interrupts (but not on RBV) */ if (!rbv_present) { + /* For all VIA types, CA1 (SLOTS IRQ) and CB1 (ASC IRQ) + * are made negative edge triggered here. + */ if (macintosh_config->scsi_type == MAC_SCSI_OLD) { /* CB2 (IRQ) indep. input, positive edge */ /* CA2 (DRQ) indep. input, positive edge */ @@ -466,21 +465,6 @@ irqreturn_t via1_irq(int irq, void *dev_id) ++irq_num; irq_bit <<= 1; } while (events >= irq_bit); - -#if 0 /* freakin' pmu is doing weird stuff */ - if (!oss_present) { - /* This (still) seems to be necessary to get IDE - working. However, if you enable VBL interrupts, - you're screwed... */ - /* FIXME: should we check the SLOTIRQ bit before - pulling this stunt? */ - /* No, it won't be set. that's why we're doing this. */ - via_irq_disable(IRQ_MAC_NUBUS); - via_irq_clear(IRQ_MAC_NUBUS); - m68k_handle_int(IRQ_MAC_NUBUS); - via_irq_enable(IRQ_MAC_NUBUS); - } -#endif return IRQ_HANDLED; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f4af967a6b30..a5255e7c79e0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -653,7 +653,7 @@ config GENERIC_CMOS_UPDATE bool default y -config SCHED_NO_NO_OMIT_FRAME_POINTER +config SCHED_OMIT_FRAME_POINTER bool default y diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index b0591ae0ce56..fd6e51224034 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -174,8 +174,8 @@ static unsigned int translate_open_flags(int flags) static void sp_setfsuidgid( uid_t uid, gid_t gid) { - current->fsuid = uid; - current->fsgid = gid; + current->cred->fsuid = uid; + current->cred->fsgid = gid; key_fsuid_changed(current); key_fsgid_changed(current); diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index dc9eb72ed9de..5e77a3a21f98 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -51,6 +51,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, int retval; struct task_struct *p; struct thread_info *ti; + uid_t euid; if (len < sizeof(new_mask)) return -EINVAL; @@ -76,9 +77,9 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, */ get_task_struct(p); + euid = current_euid(); retval = -EPERM; - if ((current->euid != p->euid) && (current->euid != p->uid) && - !capable(CAP_SYS_NICE)) { + if (euid != p->euid && euid != p->uid && !capable(CAP_SYS_NICE)) { read_unlock(&tasklist_lock); goto out_unlock; } diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index a1b3da6bad5c..010b27e01f7b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1085,8 +1085,8 @@ static int vpe_open(struct inode *inode, struct file *filp) v->load_addr = NULL; v->len = 0; - v->uid = filp->f_uid; - v->gid = filp->f_gid; + v->uid = filp->f_cred->fsuid; + v->gid = filp->f_cred->fsgid; #ifdef CONFIG_MIPS_APSP_KSPD /* get kspd to tell us when a syscall_exit happens */ diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 06213d1d6d95..f82544225e8e 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -182,7 +182,7 @@ give_sigsegv: si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); - si.si_uid = current->uid; + si.si_uid = current_uid(); si.si_addr = &frame->uc; force_sig_info(SIGSEGV, &si, current); return; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 525c13a4de93..79f25cef32df 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -141,7 +141,7 @@ config GENERIC_NVRAM bool default y if PPC32 -config SCHED_NO_NO_OMIT_FRAME_POINTER +config SCHED_OMIT_FRAME_POINTER bool default y @@ -285,6 +285,10 @@ config IOMMU_VMERGE config IOMMU_HELPER def_bool PPC64 +config PPC_NEED_DMA_SYNC_OPS + def_bool y + depends on NOT_COHERENT_CACHE + config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) @@ -322,7 +326,7 @@ config KEXEC config CRASH_DUMP bool "Build a kdump crash kernel" - depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE + depends on (PPC64 && RELOCATABLE) || 6xx help Build a kernel suitable for use as a kdump capture kernel. The same kernel binary can be used as production kernel and dump @@ -401,23 +405,53 @@ config PPC_HAS_HASH_64K depends on PPC64 default n -config PPC_64K_PAGES - bool "64k page size" - depends on PPC64 - select PPC_HAS_HASH_64K +choice + prompt "Page size" + default PPC_4K_PAGES help - This option changes the kernel logical page size to 64k. On machines - without processor support for 64k pages, the kernel will simulate - them by loading each individual 4k page on demand transparently, - while on hardware with such support, it will be used to map - normal application pages. + Select the kernel logical page size. Increasing the page size + will reduce software overhead at each page boundary, allow + hardware prefetch mechanisms to be more effective, and allow + larger dma transfers increasing IO efficiency and reducing + overhead. However the utilization of memory will increase. + For example, each cached file will using a multiple of the + page size to hold its contents and the difference between the + end of file and the end of page is wasted. + + Some dedicated systems, such as software raid serving with + accelerated calculations, have shown significant increases. + + If you configure a 64 bit kernel for 64k pages but the + processor does not support them, then the kernel will simulate + them with 4k pages, loading them on demand, but with the + reduced software overhead and larger internal fragmentation. + For the 32 bit kernel, a large page option will not be offered + unless it is supported by the configured processor. + + If unsure, choose 4K_PAGES. + +config PPC_4K_PAGES + bool "4k page size" + +config PPC_16K_PAGES + bool "16k page size" if 44x + +config PPC_64K_PAGES + bool "64k page size" if 44x || PPC_STD_MMU_64 + select PPC_HAS_HASH_64K if PPC_STD_MMU_64 + +endchoice config FORCE_MAX_ZONEORDER int "Maximum zone order" - range 9 64 if PPC_64K_PAGES - default "9" if PPC_64K_PAGES - range 13 64 if PPC64 && !PPC_64K_PAGES - default "13" if PPC64 && !PPC_64K_PAGES + range 9 64 if PPC_STD_MMU_64 && PPC_64K_PAGES + default "9" if PPC_STD_MMU_64 && PPC_64K_PAGES + range 13 64 if PPC_STD_MMU_64 && !PPC_64K_PAGES + default "13" if PPC_STD_MMU_64 && !PPC_64K_PAGES + range 9 64 if PPC_STD_MMU_32 && PPC_16K_PAGES + default "9" if PPC_STD_MMU_32 && PPC_16K_PAGES + range 7 64 if PPC_STD_MMU_32 && PPC_64K_PAGES + default "7" if PPC_STD_MMU_32 && PPC_64K_PAGES range 11 64 default "11" help @@ -437,7 +471,7 @@ config FORCE_MAX_ZONEORDER config PPC_SUBPAGE_PROT bool "Support setting protections for 4k subpages" - depends on PPC_64K_PAGES + depends on PPC_STD_MMU_64 && PPC_64K_PAGES help This option adds support for a system call to allow user programs to set access permissions (read/write, readonly, or no access) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 15eb27861fc7..08f7cc0a1953 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -2,6 +2,15 @@ menu "Kernel hacking" source "lib/Kconfig.debug" +config PRINT_STACK_DEPTH + int "Stack depth to print" if DEBUG_KERNEL + default 64 + help + This option allows you to set the stack depth that the kernel + prints in stack traces. This can be useful if your display is + too small and stack traces cause important information to + scroll off the screen. + config DEBUG_STACKOVERFLOW bool "Check for stack overflows" depends on DEBUG_KERNEL diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 1f0667069940..72d17f50e54f 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -107,7 +107,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-altivec) # (We use all available options to help semi-broken compilers) KBUILD_CFLAGS += $(call cc-option,-mno-spe) KBUILD_CFLAGS += $(call cc-option,-mspe=no) -KBUILD_CFLAGS += $(call cc-option,-mabi=no-spe) # Enable unit-at-a-time mode when possible. It shrinks the # kernel considerably. diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 3d3daa674299..f32829937aad 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -194,6 +194,7 @@ image-$(CONFIG_PPC_MAPLE) += zImage.pseries image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries image-$(CONFIG_PPC_PS3) += dtbImage.ps3 image-$(CONFIG_PPC_CELLEB) += zImage.pseries +image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries image-$(CONFIG_PPC_CHRP) += zImage.chrp image-$(CONFIG_PPC_EFIKA) += zImage.chrp image-$(CONFIG_PPC_PMAC) += zImage.pmac diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c index 5d12336dc360..a7e21a35c03a 100644 --- a/arch/powerpc/boot/devtree.c +++ b/arch/powerpc/boot/devtree.c @@ -213,7 +213,7 @@ static int find_range(u32 *reg, u32 *ranges, int nregaddr, u32 range_addr[MAX_ADDR_CELLS]; u32 range_size[MAX_ADDR_CELLS]; - copy_val(range_addr, ranges + i, naddr); + copy_val(range_addr, ranges + i, nregaddr); copy_val(range_size, ranges + i + nregaddr + naddr, nsize); if (compare_reg(reg, range_addr, range_size)) diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts index 6235fca445de..524af7ef9f26 100644 --- a/arch/powerpc/boot/dts/asp834x-redboot.dts +++ b/arch/powerpc/boot/dts/asp834x-redboot.dts @@ -199,8 +199,26 @@ reg = <0x2>; device_type = "ethernet-phy"; }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -210,6 +228,7 @@ local-mac-address = [ 00 08 e5 11 32 33 ]; interrupts = <32 0x8 33 0x8 34 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; linux,network-index = <0>; }; @@ -223,6 +242,7 @@ local-mac-address = [ 00 08 e5 11 32 34 ]; interrupts = <35 0x8 36 0x8 37 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; linux,network-index = <1>; }; diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts index 6ce0cc2c0208..aa68911f6560 100644 --- a/arch/powerpc/boot/dts/bamboo.dts +++ b/arch/powerpc/boot/dts/bamboo.dts @@ -269,7 +269,8 @@ * later cannot be changed. Chip supports a second * IO range but we don't use it for now */ - ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000 + ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000 + 0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000 0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; /* Inbound 2GB range starting at 0 */ diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index 79fe412c11c9..8b5ba8261a36 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts @@ -40,6 +40,7 @@ d-cache-size = <32768>; dcr-controller; dcr-access-method = "native"; + next-level-cache = <&L2C0>; }; }; @@ -104,6 +105,16 @@ dcr-reg = <0x00c 0x002>; }; + L2C0: l2c { + compatible = "ibm,l2-cache-460ex", "ibm,l2-cache"; + dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */ + 0x030 0x008>; /* L2 cache DCR's */ + cache-line-size = <32>; /* 32 bytes */ + cache-size = <262144>; /* L2, 256K */ + interrupt-parent = <&UIC1>; + interrupts = <11 1>; + }; + plb { compatible = "ibm,plb-460ex", "ibm,plb4"; #address-cells = <2>; @@ -343,6 +354,7 @@ * later cannot be changed */ ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000 + 0x02000000 0x00000000 0x00000000 0x0000000c 0x0ee00000 0x00000000 0x00100000 0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>; /* Inbound 2GB range starting at 0 */ @@ -373,6 +385,7 @@ * later cannot be changed */ ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000 + 0x02000000 0x00000000 0x00000000 0x0000000f 0x00000000 0x00000000 0x00100000 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>; /* Inbound 2GB range starting at 0 */ @@ -414,6 +427,7 @@ * later cannot be changed */ ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000 + 0x02000000 0x00000000 0x00000000 0x0000000f 0x00100000 0x00000000 0x00100000 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>; /* Inbound 2GB range starting at 0 */ diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts index e48cfa740c8a..9708b3423bbd 100644 --- a/arch/powerpc/boot/dts/gef_sbc610.dts +++ b/arch/powerpc/boot/dts/gef_sbc610.dts @@ -98,6 +98,12 @@ interrupt-parent = <&mpic>; }; + gef_gpio: gpio@7,14000 { + #gpio-cells = <2>; + compatible = "gef,sbc610-gpio"; + reg = <0x7 0x14000 0x24>; + gpio-controller; + }; }; soc@fef00000 { @@ -119,6 +125,11 @@ interrupt-parent = <&mpic>; dfsrr; + rtc@51 { + compatible = "epson,rx8581"; + reg = <0x00000051>; + }; + eti@6b { compatible = "dallas,ds1682"; reg = <0x6b>; diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts index 49737589ffc8..3bfff47418db 100644 --- a/arch/powerpc/boot/dts/ksi8560.dts +++ b/arch/powerpc/boot/dts/ksi8560.dts @@ -141,8 +141,26 @@ reg = <0x2>; device_type = "ethernet-phy"; }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet0: ethernet@24000 { device_type = "network"; model = "TSEC"; @@ -152,6 +170,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&PHY1>; }; @@ -164,6 +183,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&PHY2>; }; diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts index 2e5a1a1812b6..8d725d10882f 100644 --- a/arch/powerpc/boot/dts/kuroboxHD.dts +++ b/arch/powerpc/boot/dts/kuroboxHD.dts @@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ?? interrupt-parent = <&mpic>; rtc@32 { - device_type = "rtc"; compatible = "ricoh,rs5c372a"; reg = <0x32>; }; diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts index e4916e69ad31..b13a11eb81b0 100644 --- a/arch/powerpc/boot/dts/kuroboxHG.dts +++ b/arch/powerpc/boot/dts/kuroboxHG.dts @@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ?? interrupt-parent = <&mpic>; rtc@32 { - device_type = "rtc"; compatible = "ricoh,rs5c372a"; reg = <0x32>; }; diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts index 2cf9a8768f44..3f7a5dce8de0 100644 --- a/arch/powerpc/boot/dts/lite5200.dts +++ b/arch/powerpc/boot/dts/lite5200.dts @@ -130,7 +130,6 @@ rtc@800 { // Real time clock compatible = "fsl,mpc5200-rtc"; - device_type = "rtc"; reg = <0x800 0x100>; interrupts = <1 5 0 1 6 0>; interrupt-parent = <&mpc5200_pic>; diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts index 7bd5b9c399b8..63e3bb48e843 100644 --- a/arch/powerpc/boot/dts/lite5200b.dts +++ b/arch/powerpc/boot/dts/lite5200b.dts @@ -130,7 +130,6 @@ rtc@800 { // Real time clock compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; - device_type = "rtc"; reg = <0x800 0x100>; interrupts = <1 5 0 1 6 0>; interrupt-parent = <&mpc5200_pic>; diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts index 9e3c921be164..52ba6f98b273 100644 --- a/arch/powerpc/boot/dts/motionpro.dts +++ b/arch/powerpc/boot/dts/motionpro.dts @@ -248,7 +248,6 @@ fsl5200-clocking; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; }; diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts index 503031766825..d4df8b6857a4 100644 --- a/arch/powerpc/boot/dts/mpc8313erdb.dts +++ b/arch/powerpc/boot/dts/mpc8313erdb.dts @@ -190,6 +190,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 0x8 36 0x8 35 0x8>; interrupt-parent = <&ipic>; + tbi-handle = < &tbi0 >; phy-handle = < &phy1 >; fsl,magic-packet; @@ -210,6 +211,10 @@ reg = <0x4>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; }; @@ -222,9 +227,24 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <34 0x8 33 0x8 32 0x8>; interrupt-parent = <&ipic>; + tbi-handle = < &tbi1 >; phy-handle = < &phy4 >; sleep = <&pmc 0x10000000>; fsl,magic-packet; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + }; serial0: serial@4500 { diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts index 6b850670de1d..072c9b0f8c8e 100644 --- a/arch/powerpc/boot/dts/mpc8315erdb.dts +++ b/arch/powerpc/boot/dts/mpc8315erdb.dts @@ -117,7 +117,6 @@ interrupt-parent = <&ipic>; dfsrr; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; }; @@ -206,8 +205,25 @@ reg = <0x1>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -217,6 +233,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <32 0x8 33 0x8 34 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = < &phy0 >; }; @@ -229,6 +246,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 0x8 36 0x8 37 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = < &phy1 >; }; diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts index 4bdbaf4993a1..b5eda94a8e2a 100644 --- a/arch/powerpc/boot/dts/mpc8349emitx.dts +++ b/arch/powerpc/boot/dts/mpc8349emitx.dts @@ -85,7 +85,6 @@ dfsrr; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; interrupts = <18 0x8>; @@ -184,6 +183,22 @@ reg = <0x1c>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -195,6 +210,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <32 0x8 33 0x8 34 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy1c>; linux,network-index = <0>; }; @@ -211,6 +227,7 @@ /* Vitesse 7385 isn't on the MDIO bus */ fixed-link = <1 1 1000 0 0>; linux,network-index = <1>; + tbi-handle = <&tbi1>; }; serial0: serial@4500 { diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts index fa40647ee62e..c87a6015e165 100644 --- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts +++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts @@ -83,7 +83,6 @@ dfsrr; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; interrupts = <18 0x8>; @@ -163,6 +162,10 @@ reg = <0x1c>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -174,6 +177,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <32 0x8 33 0x8 34 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy1c>; linux,network-index = <0>; }; diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts index c986c541e9bb..d9adba01c09c 100644 --- a/arch/powerpc/boot/dts/mpc834x_mds.dts +++ b/arch/powerpc/boot/dts/mpc834x_mds.dts @@ -185,8 +185,25 @@ reg = <0x1>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -196,6 +213,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <32 0x8 33 0x8 34 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; linux,network-index = <0>; }; @@ -209,6 +227,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 0x8 36 0x8 37 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; linux,network-index = <1>; }; diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts index 0484561bd2c0..1d14d7052e6d 100644 --- a/arch/powerpc/boot/dts/mpc8377_mds.dts +++ b/arch/powerpc/boot/dts/mpc8377_mds.dts @@ -193,8 +193,25 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -205,6 +222,7 @@ interrupts = <32 0x8 33 0x8 34 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -218,6 +236,7 @@ interrupts = <35 0x8 36 0x8 37 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = <&phy3>; }; diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts index 435ef3dd022d..9413af3b9925 100644 --- a/arch/powerpc/boot/dts/mpc8377_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts @@ -117,7 +117,6 @@ interrupt-parent = <&ipic>; dfsrr; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; }; @@ -211,8 +210,25 @@ reg = <0x2>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -223,6 +239,7 @@ interrupts = <32 0x8 33 0x8 34 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -237,6 +254,7 @@ phy-connection-type = "mii"; interrupt-parent = <&ipic>; fixed-link = <1 1 1000 0 0>; + tbi-handle = <&tbi1>; }; serial0: serial@4500 { diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts index 67a08d2e2ff2..b85fc02682d2 100644 --- a/arch/powerpc/boot/dts/mpc8378_mds.dts +++ b/arch/powerpc/boot/dts/mpc8378_mds.dts @@ -232,8 +232,25 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -244,6 +261,7 @@ interrupts = <32 0x8 33 0x8 34 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -257,6 +275,7 @@ interrupts = <35 0x8 36 0x8 37 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = <&phy3>; }; diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts index b11e68f56a06..23c10ce22c2c 100644 --- a/arch/powerpc/boot/dts/mpc8378_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts @@ -117,7 +117,6 @@ interrupt-parent = <&ipic>; dfsrr; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; }; @@ -211,8 +210,25 @@ reg = <0x2>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts index 323370a2b5ff..acf06c438dbf 100644 --- a/arch/powerpc/boot/dts/mpc8379_mds.dts +++ b/arch/powerpc/boot/dts/mpc8379_mds.dts @@ -232,6 +232,22 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -244,6 +260,7 @@ interrupts = <32 0x8 33 0x8 34 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -257,6 +274,7 @@ interrupts = <35 0x8 36 0x8 37 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = <&phy3>; }; diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts index 337af6ea26d3..72cdc3c4c7e3 100644 --- a/arch/powerpc/boot/dts/mpc8379_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts @@ -117,7 +117,6 @@ interrupt-parent = <&ipic>; dfsrr; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1339"; reg = <0x68>; }; @@ -211,6 +210,22 @@ reg = <0x2>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -223,6 +238,7 @@ interrupts = <32 0x8 33 0x8 34 0x8>; phy-connection-type = "mii"; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -237,6 +253,7 @@ phy-connection-type = "mii"; interrupt-parent = <&ipic>; fixed-link = <1 1 1000 0 0>; + tbi-handle = <&tbi1>; }; serial0: serial@4500 { diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts index 35db1e5440c7..3c905df1812c 100644 --- a/arch/powerpc/boot/dts/mpc8536ds.dts +++ b/arch/powerpc/boot/dts/mpc8536ds.dts @@ -155,6 +155,22 @@ reg = <1>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; usb@22000 { @@ -186,6 +202,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; @@ -199,6 +216,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; }; diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts index 9568bfaff8f7..79570ffe41b9 100644 --- a/arch/powerpc/boot/dts/mpc8540ads.dts +++ b/arch/powerpc/boot/dts/mpc8540ads.dts @@ -150,6 +150,34 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -161,6 +189,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -173,6 +202,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; @@ -185,6 +215,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <41 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy3>; }; diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts index 6480f4fd96e0..221036a8ce23 100644 --- a/arch/powerpc/boot/dts/mpc8541cds.dts +++ b/arch/powerpc/boot/dts/mpc8541cds.dts @@ -144,6 +144,22 @@ reg = <0x1>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -155,6 +171,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -167,6 +184,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts index f1fb20737e3e..b9da42105066 100644 --- a/arch/powerpc/boot/dts/mpc8544ds.dts +++ b/arch/powerpc/boot/dts/mpc8544ds.dts @@ -116,8 +116,26 @@ reg = <0x1>; device_type = "ethernet-phy"; }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + dma@21300 { #address-cells = <1>; #size-cells = <1>; @@ -169,6 +187,7 @@ interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; phy-handle = <&phy0>; + tbi-handle = <&tbi0>; phy-connection-type = "rgmii-id"; }; @@ -182,6 +201,7 @@ interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; phy-handle = <&phy1>; + tbi-handle = <&tbi1>; phy-connection-type = "rgmii-id"; }; diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts index 431b496270dc..df774a7088ff 100644 --- a/arch/powerpc/boot/dts/mpc8548cds.dts +++ b/arch/powerpc/boot/dts/mpc8548cds.dts @@ -172,6 +172,46 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@27520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x27520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -183,6 +223,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -195,6 +236,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; @@ -208,6 +250,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy2>; }; @@ -220,6 +263,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 2 38 2 39 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; phy-handle = <&phy3>; }; */ diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts index d833a5c4f476..053b01e1c93b 100644 --- a/arch/powerpc/boot/dts/mpc8555cds.dts +++ b/arch/powerpc/boot/dts/mpc8555cds.dts @@ -144,6 +144,22 @@ reg = <0x1>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -155,6 +171,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -167,6 +184,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts index 4d1f2f284094..11b1bcbe14ce 100644 --- a/arch/powerpc/boot/dts/mpc8560ads.dts +++ b/arch/powerpc/boot/dts/mpc8560ads.dts @@ -145,6 +145,22 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -156,6 +172,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -168,6 +185,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts index c80158f7741d..1955bd9e113d 100644 --- a/arch/powerpc/boot/dts/mpc8568mds.dts +++ b/arch/powerpc/boot/dts/mpc8568mds.dts @@ -179,6 +179,22 @@ reg = <0x3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -190,6 +206,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -202,6 +219,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy3>; }; diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts index 5c69b2fafd32..21459e161d02 100644 --- a/arch/powerpc/boot/dts/mpc8572ds.dts +++ b/arch/powerpc/boot/dts/mpc8572ds.dts @@ -63,6 +63,119 @@ device_type = "memory"; }; + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; + + ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 + 0x1 0x0 0x0 0xe0000000 0x08000000 + 0x2 0x0 0x0 0xffa00000 0x00040000 + 0x3 0x0 0x0 0xffdf0000 0x00008000 + 0x4 0x0 0x0 0xffa40000 0x00040000 + 0x5 0x0 0x0 0xffa80000 0x00040000 + 0x6 0x0 0x0 0xffac0000 0x00040000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + bank-width = <2>; + device-width = <1>; + + ramdisk@0 { + reg = <0x0 0x03000000>; + readl-only; + }; + + diagnostic@3000000 { + reg = <0x03000000 0x00e00000>; + read-only; + }; + + dink@3e00000 { + reg = <0x03e00000 0x00200000>; + read-only; + }; + + kernel@4000000 { + reg = <0x04000000 0x00400000>; + read-only; + }; + + jffs2@4400000 { + reg = <0x04400000 0x03b00000>; + }; + + dtb@7f00000 { + reg = <0x07f00000 0x00080000>; + read-only; + }; + + u-boot@7f80000 { + reg = <0x07f80000 0x00080000>; + read-only; + }; + }; + + nand@2,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x2 0x0 0x40000>; + + u-boot@0 { + reg = <0x0 0x02000000>; + read-only; + }; + + jffs2@2000000 { + reg = <0x02000000 0x10000000>; + }; + + ramdisk@12000000 { + reg = <0x12000000 0x08000000>; + read-only; + }; + + kernel@1a000000 { + reg = <0x1a000000 0x04000000>; + }; + + dtb@1e000000 { + reg = <0x1e000000 0x01000000>; + read-only; + }; + + empty@1f000000 { + reg = <0x1f000000 0x21000000>; + }; + }; + + nand@4,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x4 0x0 0x40000>; + }; + + nand@5,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x5 0x0 0x40000>; + }; + + nand@6,0 { + compatible = "fsl,mpc8572-fcm-nand", + "fsl,elbc-fcm-nand"; + reg = <0x6 0x0 0x40000>; + }; + }; + soc8572@ffe00000 { #address-cells = <1>; #size-cells = <1>; @@ -225,6 +338,47 @@ interrupts = <10 1>; reg = <0x3>; }; + + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@27520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x27520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -236,6 +390,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; }; @@ -249,6 +404,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; @@ -262,6 +418,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy2>; phy-connection-type = "rgmii-id"; }; @@ -275,6 +432,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 2 38 2 39 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; phy-handle = <&phy3>; phy-connection-type = "rgmii-id"; }; diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts new file mode 100644 index 000000000000..c114c4ee9931 --- /dev/null +++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts @@ -0,0 +1,483 @@ +/* + * MPC8572 DS Core0 Device Tree Source in CAMP mode. + * + * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache + * can be shared, all the other devices must be assigned to one core only. + * This dts file allows core0 to have memory, l2, i2c, dma1, global-util, eth0, + * eth1, crypto, pci0, pci1. + * + * Copyright 2007, 2008 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + model = "fsl,MPC8572DS"; + compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP"; + #address-cells = <1>; + #size-cells = <1>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + pci0 = &pci0; + pci1 = &pci1; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8572@0 { + device_type = "cpu"; + reg = <0x0>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + + }; + + memory { + device_type = "memory"; + reg = <0x0 0x0>; // Filled by U-Boot + }; + + soc8572@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x0 0xffe00000 0x100000>; + reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed + bus-frequency = <0>; // Filled out by uboot. + + memory-controller@2000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + memory-controller@6000 { + compatible = "fsl,mpc8572-memory-controller"; + reg = <0x6000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8572-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + mdio@24520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x24520 0x20>; + + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <10 1>; + reg = <0x1>; + }; + }; + + enet0: ethernet@24000 { + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy0>; + phy-connection-type = "rgmii-id"; + }; + + enet1: ethernet@25000 { + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,mpc8572-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + crypto@30000 { + compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2", + "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x9fe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + protected-sources = < + 31 32 33 37 38 39 /* enet2 enet3 */ + 76 77 78 79 27 42 /* dma2 pci2 serial*/ + 0xe0 0xe1 0xe2 0xe3 /* msi */ + 0xe4 0xe5 0xe6 0xe7 + >; + }; + }; + + pci0: pcie@ffe08000 { + cell-index = <0>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xffe08000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <24 2>; + interrupt-map-mask = <0xff00 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x11 func 0 - PCI slot 1 */ + 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 1 - PCI slot 1 */ + 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 2 - PCI slot 1 */ + 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 3 - PCI slot 1 */ + 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 4 - PCI slot 1 */ + 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 5 - PCI slot 1 */ + 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 6 - PCI slot 1 */ + 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 func 7 - PCI slot 1 */ + 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x12 func 0 - PCI slot 2 */ + 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 1 - PCI slot 2 */ + 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 2 - PCI slot 2 */ + 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 3 - PCI slot 2 */ + 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 4 - PCI slot 2 */ + 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 5 - PCI slot 2 */ + 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 6 - PCI slot 2 */ + 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 + + /* IDSEL 0x12 func 7 - PCI slot 2 */ + 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 + 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 + 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 + 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 + + // IDSEL 0x1c USB + 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2 + 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2 + 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2 + 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2 + + // IDSEL 0x1d Audio + 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2 + + // IDSEL 0x1e Legacy + 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2 + 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2 + + // IDSEL 0x1f IDE/SATA + 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2 + 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2 + + >; + + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + uli1575@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + isa@1e { + device_type = "isa"; + #interrupt-cells = <2>; + #size-cells = <1>; + #address-cells = <2>; + reg = <0xf000 0x0 0x0 0x0 0x0>; + ranges = <0x1 0x0 0x1000000 0x0 0x0 + 0x1000>; + interrupt-parent = <&i8259>; + + i8259: interrupt-controller@20 { + reg = <0x1 0x20 0x2 + 0x1 0xa0 0x2 + 0x1 0x4d0 0x2>; + interrupt-controller; + device_type = "interrupt-controller"; + #address-cells = <0>; + #interrupt-cells = <2>; + compatible = "chrp,iic"; + interrupts = <9 2>; + interrupt-parent = <&mpic>; + }; + + i8042@60 { + #size-cells = <0>; + #address-cells = <1>; + reg = <0x1 0x60 0x1 0x1 0x64 0x1>; + interrupts = <1 3 12 3>; + interrupt-parent = + <&i8259>; + + keyboard@0 { + reg = <0x0>; + compatible = "pnpPNP,303"; + }; + + mouse@1 { + reg = <0x1>; + compatible = "pnpPNP,f03"; + }; + }; + + rtc@70 { + compatible = "pnpPNP,b00"; + reg = <0x1 0x70 0x2>; + }; + + gpio@400 { + reg = <0x1 0x400 0x80>; + }; + }; + }; + }; + + }; + + pci1: pcie@ffe09000 { + cell-index = <1>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xffe09000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xa0000000 + 0x2000000 0x0 0xa0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts new file mode 100644 index 000000000000..04ecda18d206 --- /dev/null +++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts @@ -0,0 +1,234 @@ +/* + * MPC8572 DS Core1 Device Tree Source in CAMP mode. + * + * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache + * can be shared, all the other devices must be assigned to one core only. + * This dts allows core1 to have l2, dma2, eth2, eth3, pci2, msi. + * + * Please note to add "-b 1" for core1's dts compiling. + * + * Copyright 2007, 2008 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + model = "fsl,MPC8572DS"; + compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP"; + #address-cells = <1>; + #size-cells = <1>; + + aliases { + ethernet2 = &enet2; + ethernet3 = &enet3; + serial0 = &serial0; + pci2 = &pci2; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8572@1 { + device_type = "cpu"; + reg = <0x1>; + d-cache-line-size = <32>; // 32 bytes + i-cache-line-size = <32>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x0>; // Filled by U-Boot + }; + + soc8572@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + ranges = <0x0 0xffe00000 0x100000>; + reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed + bus-frequency = <0>; // Filled out by uboot. + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8572-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + }; + + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8572-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; + }; + + mdio@24520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x24520 0x20>; + + phy2: ethernet-phy@2 { + interrupt-parent = <&mpic>; + reg = <0x2>; + }; + phy3: ethernet-phy@3 { + interrupt-parent = <&mpic>; + reg = <0x3>; + }; + }; + + enet2: ethernet@26000 { + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy2>; + phy-connection-type = "rgmii-id"; + }; + + enet3: ethernet@27000 { + cell-index = <3>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x27000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <37 2 38 2 39 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy3>; + phy-connection-type = "rgmii-id"; + }; + + msi@41600 { + compatible = "fsl,mpc8572-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + serial0: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + protected-sources = < + 18 16 10 42 45 58 /* MEM L2 mdio serial crypto */ + 29 30 34 35 36 40 /* enet0 enet1 */ + 24 26 20 21 22 23 /* pcie0 pcie1 dma1 */ + 43 /* i2c */ + 0x1 0x2 0x3 0x4 /* pci slot */ + 0x9 0xa 0xb 0xc /* usb */ + 0x6 0x7 0xe 0x5 /* Audio elgacy SATA */ + >; + }; + }; + + pci2: pcie@ffe0a000 { + cell-index = <2>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xffe0a000 0x1000>; + bus-range = <0 255>; + ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000 + 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <27 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xc0000000 + 0x2000000 0x0 0xc0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts index d665e767822a..35d5e248ccd7 100644 --- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts +++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts @@ -205,8 +205,49 @@ reg = <3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@27520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x27520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; + enet0: ethernet@24000 { cell-index = <0>; device_type = "network"; @@ -216,6 +257,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; }; @@ -229,6 +271,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; @@ -242,6 +285,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy2>; phy-connection-type = "rgmii-id"; }; @@ -255,6 +299,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 2 38 2 39 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; phy-handle = <&phy3>; phy-connection-type = "rgmii-id"; }; diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts index 7c1bb952360c..be2c11ca0594 100644 --- a/arch/powerpc/boot/dts/pcm030.dts +++ b/arch/powerpc/boot/dts/pcm030.dts @@ -143,7 +143,6 @@ rtc@800 { // Real time clock compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; - device_type = "rtc"; reg = <0x800 0x100>; interrupts = <0x1 0x5 0x0 0x1 0x6 0x0>; interrupt-parent = <&mpc5200_pic>; @@ -301,7 +300,6 @@ interrupt-parent = <&mpc5200_pic>; fsl5200-clocking; rtc@51 { - device_type = "rtc"; compatible = "nxp,pcf8563"; reg = <0x51>; }; diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts index 0f941f310e44..8d365a57ebc1 100644 --- a/arch/powerpc/boot/dts/sbc8349.dts +++ b/arch/powerpc/boot/dts/sbc8349.dts @@ -177,6 +177,22 @@ reg = <0x1a>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -188,6 +204,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <32 0x8 33 0x8 34 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; linux,network-index = <0>; }; @@ -201,6 +218,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 0x8 36 0x8 37 0x8>; interrupt-parent = <&ipic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; linux,network-index = <1>; }; diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts index 333552b4e90d..2baf4a51f224 100644 --- a/arch/powerpc/boot/dts/sbc8548.dts +++ b/arch/powerpc/boot/dts/sbc8548.dts @@ -252,6 +252,22 @@ reg = <0x1a>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -263,6 +279,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -275,6 +292,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/sbc8560.dts b/arch/powerpc/boot/dts/sbc8560.dts index db3632ef9888..01542f7062ab 100644 --- a/arch/powerpc/boot/dts/sbc8560.dts +++ b/arch/powerpc/boot/dts/sbc8560.dts @@ -168,6 +168,22 @@ reg = <0x1c>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -179,6 +195,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; }; @@ -191,6 +208,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts index 9652456158fb..36db981548e4 100644 --- a/arch/powerpc/boot/dts/sbc8641d.dts +++ b/arch/powerpc/boot/dts/sbc8641d.dts @@ -222,6 +222,46 @@ reg = <2>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@27520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x27520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -233,6 +273,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; }; @@ -246,6 +287,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; @@ -259,6 +301,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy2>; phy-connection-type = "rgmii-id"; }; @@ -272,6 +315,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 2 38 2 39 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; phy-handle = <&phy3>; phy-connection-type = "rgmii-id"; }; diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts index fcd1db6ca0a8..fff33fe6efc6 100644 --- a/arch/powerpc/boot/dts/stx_gp3_8560.dts +++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts @@ -142,6 +142,22 @@ reg = <4>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -153,6 +169,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -165,6 +182,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy4>; }; diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts index 3008bf8830c1..906302e26a62 100644 --- a/arch/powerpc/boot/dts/tqm5200.dts +++ b/arch/powerpc/boot/dts/tqm5200.dts @@ -181,7 +181,6 @@ fsl5200-clocking; rtc@68 { - device_type = "rtc"; compatible = "dallas,ds1307"; reg = <0x68>; }; diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts index e1d260b9085e..a693f01c21aa 100644 --- a/arch/powerpc/boot/dts/tqm8540.dts +++ b/arch/powerpc/boot/dts/tqm8540.dts @@ -155,6 +155,34 @@ reg = <3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts index d76441ec5dc7..9e3f5f0dde20 100644 --- a/arch/powerpc/boot/dts/tqm8541.dts +++ b/arch/powerpc/boot/dts/tqm8541.dts @@ -154,6 +154,22 @@ reg = <3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -165,6 +181,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -177,6 +194,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts index 4199e89b4e50..15086eb65c50 100644 --- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts +++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts @@ -179,6 +179,46 @@ reg = <5>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@27520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x27520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -190,6 +230,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -202,6 +243,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; @@ -214,6 +256,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy3>; }; @@ -226,6 +269,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 2 38 2 39 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; phy-handle = <&phy4>; }; diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts index 58ee4185454b..b7b65f5e79b6 100644 --- a/arch/powerpc/boot/dts/tqm8548.dts +++ b/arch/powerpc/boot/dts/tqm8548.dts @@ -179,6 +179,46 @@ reg = <5>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@27520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x27520 0x20>; + + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -190,6 +230,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -202,6 +243,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; @@ -214,6 +256,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <31 2 32 2 33 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi2>; phy-handle = <&phy3>; }; @@ -226,6 +269,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <37 2 38 2 39 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi3>; phy-handle = <&phy4>; }; diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts index 6f7ea59c4846..cf92b4e7945e 100644 --- a/arch/powerpc/boot/dts/tqm8555.dts +++ b/arch/powerpc/boot/dts/tqm8555.dts @@ -154,6 +154,22 @@ reg = <3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -165,6 +181,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -177,6 +194,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts index 3fe35208907b..9e1ab2d2f669 100644 --- a/arch/powerpc/boot/dts/tqm8560.dts +++ b/arch/powerpc/boot/dts/tqm8560.dts @@ -156,6 +156,22 @@ reg = <3>; device_type = "ethernet-phy"; }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x25520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; enet0: ethernet@24000 { @@ -167,6 +183,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; phy-handle = <&phy2>; }; @@ -179,6 +196,7 @@ local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; phy-handle = <&phy1>; }; diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c index 9276327bc2bb..bb8b9b3505ee 100644 --- a/arch/powerpc/boot/libfdt-wrapper.c +++ b/arch/powerpc/boot/libfdt-wrapper.c @@ -185,7 +185,7 @@ void fdt_init(void *blob) /* Make sure the dt blob is the right version and so forth */ fdt = blob; - bufsize = fdt_totalsize(fdt) + 4; + bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY; buf = malloc(bufsize); if(!buf) fatal("malloc failed. can't relocate the device tree\n\r"); diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index 07ccaf89f379..cd1ffa449327 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig @@ -1397,8 +1397,11 @@ CONFIG_USB_STORAGE=y # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set # CONFIG_EDAC is not set -CONFIG_RTC_LIB=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set # # RTC interfaces @@ -1424,6 +1427,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set +CONFIG_RTC_DRV_RX8581=y # # SPI RTC drivers diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index cfc94cfcf4cb..034a1fbdc887 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig @@ -267,7 +267,7 @@ CONFIG_PCI_SYSCALL=y # CONFIG_PCIEPORTBUS is not set CONFIG_ARCH_SUPPORTS_MSI=y # CONFIG_PCI_MSI is not set -CONFIG_PCI_LEGACY=y +# CONFIG_PCI_LEGACY is not set # CONFIG_PCI_DEBUG is not set # CONFIG_PCCARD is not set # CONFIG_HOTPLUG_PCI is not set @@ -354,7 +354,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y # CONFIG_IP_SCTP is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set -# CONFIG_BRIDGE is not set +CONFIG_BRIDGE=m # CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set @@ -579,7 +579,7 @@ CONFIG_NETDEVICES=y # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set # CONFIG_EQUALIZER is not set -# CONFIG_TUN is not set +CONFIG_TUN=m # CONFIG_VETH is not set # CONFIG_ARCNET is not set # CONFIG_PHYLIB is not set @@ -1001,11 +1001,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# may also be needed; see USB_STORAGE Help for more information +# see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_DEBUG is not set @@ -1418,6 +1418,6 @@ CONFIG_CRYPTO_LZO=m # CONFIG_PPC_CLOCK is not set CONFIG_VIRTUALIZATION=y CONFIG_KVM=y -CONFIG_KVM_BOOKE_HOST=y +CONFIG_KVM_440=y # CONFIG_VIRTIO_PCI is not set # CONFIG_VIRTIO_BALLOON is not set diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index f3fc733758f5..499be5bdd6fa 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ int atomic_inc_return(atomic_t *v) @@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ int atomic_dec_return(atomic_t *v) @@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -346,7 +346,7 @@ static __inline__ void atomic64_inc(atomic64_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ long atomic64_inc_return(atomic64_t *v) @@ -362,7 +362,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -388,7 +388,7 @@ static __inline__ void atomic64_dec(atomic64_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ long atomic64_dec_return(atomic64_t *v) @@ -404,7 +404,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -431,7 +431,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) "\n\ 2:" : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index e55d1f66b86f..64e1fdca233e 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ #include <asm/asm-compat.h> + /* * Define an illegal instr to trap on the bug. * We don't use 0 because that marks the end of a function @@ -14,6 +15,7 @@ #ifdef CONFIG_BUG #ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> #ifdef CONFIG_DEBUG_BUGVERBOSE .macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"a" @@ -26,7 +28,7 @@ .previous .endm #else - .macro EMIT_BUG_ENTRY addr,file,line,flags +.macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"a" 5001: PPC_LONG \addr .short \flags @@ -113,6 +115,13 @@ #define HAVE_ARCH_BUG_ON #define HAVE_ARCH_WARN_ON #endif /* __ASSEMBLY __ */ +#else +#ifdef __ASSEMBLY__ +.macro EMIT_BUG_ENTRY addr,file,line,flags +.endm +#else /* !__ASSEMBLY__ */ +#define _EMIT_BUG_ENTRY +#endif #endif /* CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h index b37752214a16..d5de325472e9 100644 --- a/arch/powerpc/include/asm/byteorder.h +++ b/arch/powerpc/include/asm/byteorder.h @@ -11,6 +11,8 @@ #include <asm/types.h> #include <linux/compiler.h> +#define __BIG_ENDIAN + #ifdef __GNUC__ #ifdef __KERNEL__ @@ -21,12 +23,19 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr) __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); return val; } +#define __arch_swab16p ld_le16 static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) { __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); } +static inline void __arch_swab16s(__u16 *addr) +{ + st_le16(addr, *addr); +} +#define __arch_swab16s __arch_swab16s + static __inline__ __u32 ld_le32(const volatile __u32 *addr) { __u32 val; @@ -34,13 +43,20 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr) __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); return val; } +#define __arch_swab32p ld_le32 static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) { __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); } -static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) +static inline void __arch_swab32s(__u32 *addr) +{ + st_le32(addr, *addr); +} +#define __arch_swab32s __arch_swab32s + +static inline __attribute_const__ __u16 __arch_swab16(__u16 value) { __u16 result; @@ -49,8 +65,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) : "r" (value), "0" (value >> 8)); return result; } +#define __arch_swab16 __arch_swab16 -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) +static inline __attribute_const__ __u32 __arch_swab32(__u32 value) { __u32 result; @@ -61,29 +78,16 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) : "r" (value), "0" (value >> 24)); return result; } - -#define __arch__swab16(x) ___arch__swab16(x) -#define __arch__swab32(x) ___arch__swab32(x) - -/* The same, but returns converted value from the location pointer by addr. */ -#define __arch__swab16p(addr) ld_le16(addr) -#define __arch__swab32p(addr) ld_le32(addr) - -/* The same, but do the conversion in situ, ie. put the value back to addr. */ -#define __arch__swab16s(addr) st_le16(addr,*addr) -#define __arch__swab32s(addr) st_le32(addr,*addr) +#define __arch_swab32 __arch_swab32 #endif /* __KERNEL__ */ -#ifndef __STRICT_ANSI__ -#define __BYTEORDER_HAS_U64__ #ifndef __powerpc64__ #define __SWAB_64_THRU_32__ #endif /* __powerpc64__ */ -#endif /* __STRICT_ANSI__ */ #endif /* __GNUC__ */ -#include <linux/byteorder/big_endian.h> +#include <linux/byteorder.h> #endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 1e94b07a020e..4911104791c3 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -82,6 +82,7 @@ struct cpu_spec { char *cpu_name; unsigned long cpu_features; /* Kernel features */ unsigned int cpu_user_features; /* Userland features */ + unsigned int mmu_features; /* MMU features */ /* cache line sizes */ unsigned int icache_bsize; @@ -144,17 +145,14 @@ extern const char *powerpc_base_platform; #define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) #define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) #define CPU_FTR_601 ASM_CONST(0x0000000000000100) -#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200) #define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) #define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) #define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) #define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) #define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) #define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) -#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000) #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) -#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) #define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) #define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) @@ -163,6 +161,8 @@ extern const char *powerpc_base_platform; #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) +#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) /* * Add the 64-bit processor unique features in the top half of the word; @@ -177,7 +177,6 @@ extern const char *powerpc_base_platform; #define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) #define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) #define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) -#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000) #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) #define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) @@ -194,6 +193,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) +#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) #ifndef __ASSEMBLY__ @@ -264,164 +264,159 @@ extern const char *powerpc_base_platform; !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ !defined(CONFIG_BOOKE)) -#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \ +#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE) #define CPU_FTRS_603 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_604 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE) + CPU_FTR_USE_TB | CPU_FTR_PPC_LE) #define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_740 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) #define CPU_FTRS_750 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) -#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_750CL (CPU_FTRS_750) #define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) #define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM) -#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \ - CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX) #define CPU_FTRS_750GX (CPU_FTRS_750FX) #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ + CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_7400 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ + CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \ - CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) + CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ - CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \ CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447A (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7448 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_82XX (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS) + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP) #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON) #define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) -#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE) +#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB) #define CPU_FTRS_8XX (CPU_FTR_USE_TB) -#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) -#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ + CPU_FTR_INDEXED_DCR) #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ - CPU_FTR_UNIFIED_ID_CACHE) + CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN) + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \ - CPU_FTR_NODSISRALIGN) + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR | CPU_FTR_LWSYNC) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) + CPU_FTR_IABR | CPU_FTR_PPC_LE) #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ + CPU_FTR_IABR | \ CPU_FTR_MMCRA | CPU_FTR_CTRL) #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR) #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR) + CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD) #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_SAO) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ - CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ) + CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ + CPU_FTR_UNALIGNED_LD_STD) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) -#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2) +#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #ifdef __powerpc64__ #define CPU_FTRS_POSSIBLE \ @@ -452,7 +447,7 @@ enum { CPU_FTRS_40X | #endif #ifdef CONFIG_44x - CPU_FTRS_44X | + CPU_FTRS_44X | CPU_FTRS_440x6 | #endif #ifdef CONFIG_E200 CPU_FTRS_E200 | @@ -492,7 +487,7 @@ enum { CPU_FTRS_40X & #endif #ifdef CONFIG_44x - CPU_FTRS_44X & + CPU_FTRS_44X & CPU_FTRS_440x6 & #endif #ifdef CONFIG_E200 CPU_FTRS_E200 & diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 72d2b72c7390..7d2e6235726d 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -23,6 +23,7 @@ #ifndef __ASSEMBLY__ #include <linux/spinlock.h> +#include <asm/cputable.h> typedef struct { unsigned int base; @@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host) #define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) #define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) -/* Device Control Registers */ -void __mtdcr(int reg, unsigned int val); -unsigned int __mfdcr(int reg); +/* Table based DCR accessors */ +extern void __mtdcr(unsigned int reg, unsigned int val); +extern unsigned int __mfdcr(unsigned int reg); + +/* mfdcrx/mtdcrx instruction based accessors. We hand code + * the opcodes in order not to depend on newer binutils + */ +static inline unsigned int mfdcrx(unsigned int reg) +{ + unsigned int ret; + asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)" + : "=r" (ret) : "r" (reg)); + return ret; +} + +static inline void mtdcrx(unsigned int reg, unsigned int val) +{ + asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)" + : : "r" (val), "r" (reg)); +} + #define mfdcr(rn) \ ({unsigned int rval; \ - if (__builtin_constant_p(rn)) \ + if (__builtin_constant_p(rn) && rn < 1024) \ asm volatile("mfdcr %0," __stringify(rn) \ : "=r" (rval)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + rval = mfdcrx(rn); \ else \ rval = __mfdcr(rn); \ rval;}) #define mtdcr(rn, v) \ do { \ - if (__builtin_constant_p(rn)) \ + if (__builtin_constant_p(rn) && rn < 1024) \ asm volatile("mtdcr " __stringify(rn) ",%0" \ : : "r" (v)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + mtdcrx(rn, v); \ else \ __mtdcr(rn, v); \ } while (0) @@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg) unsigned int val; spin_lock_irqsave(&dcr_ind_lock, flags); - __mtdcr(base_addr, reg); - val = __mfdcr(base_data); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + val = mfdcrx(base_data); + } else { + __mtdcr(base_addr, reg); + val = __mfdcr(base_data); + } spin_unlock_irqrestore(&dcr_ind_lock, flags); return val; } @@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg, unsigned long flags; spin_lock_irqsave(&dcr_ind_lock, flags); - __mtdcr(base_addr, reg); - __mtdcr(base_data, val); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + mtdcrx(base_data, val); + } else { + __mtdcr(base_addr, reg); + __mtdcr(base_data, val); + } spin_unlock_irqrestore(&dcr_ind_lock, flags); } @@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg, unsigned int val; spin_lock_irqsave(&dcr_ind_lock, flags); - __mtdcr(base_addr, reg); - val = (__mfdcr(base_data) & ~clr) | set; - __mtdcr(base_data, val); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + val = (mfdcrx(base_data) & ~clr) | set; + mtdcrx(base_data, val); + } else { + __mtdcr(base_addr, reg); + val = (__mfdcr(base_data) & ~clr) | set; + __mtdcr(base_data, val); + } spin_unlock_irqrestore(&dcr_ind_lock, flags); } diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h index d13fb68bb5c0..9d6851cfb841 100644 --- a/arch/powerpc/include/asm/dcr.h +++ b/arch/powerpc/include/asm/dcr.h @@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t; * additional helpers to read the DCR * base from the device-tree */ struct device_node; -extern unsigned int dcr_resource_start(struct device_node *np, +extern unsigned int dcr_resource_start(const struct device_node *np, unsigned int index); -extern unsigned int dcr_resource_len(struct device_node *np, +extern unsigned int dcr_resource_len(const struct device_node *np, unsigned int index); #endif /* CONFIG_PPC_DCR */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index dfd504caccc1..7d2277cef09a 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -18,4 +18,16 @@ struct dev_archdata { void *dma_data; }; +static inline void dev_archdata_set_node(struct dev_archdata *ad, + struct device_node *np) +{ + ad->of_node = np; +} + +static inline struct device_node * +dev_archdata_get_node(const struct dev_archdata *ad) +{ + return ad->of_node; +} + #endif /* _ASM_POWERPC_DEVICE_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index fddb229bd74f..86cef7ddc8d5 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -60,12 +60,6 @@ struct dma_mapping_ops { dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *dev, void *ptr, - size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs); - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs); @@ -82,6 +76,22 @@ struct dma_mapping_ops { dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs); +#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS + void (*sync_single_range_for_cpu)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, + enum dma_data_direction direction); + void (*sync_single_range_for_device)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, + enum dma_data_direction direction); + void (*sync_sg_for_cpu)(struct device *hwdev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction); + void (*sync_sg_for_device)(struct device *hwdev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction); +#endif }; /* @@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) } /* - * TODO: map_/unmap_single will ideally go away, to be completely - * replaced by map/unmap_page. Until then, we allow dma_ops to have - * one or the other, or both by checking to see if the specific - * function requested exists; and if not, falling back on the other set. + * map_/unmap_single actually call through to map/unmap_page now that all the + * dma_mapping_ops have been converted over. We just have to get the page and + * offset to pass through to map_page */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *cpu_addr, @@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->map_single) - return dma_ops->map_single(dev, cpu_addr, size, direction, - attrs); - return dma_ops->map_page(dev, virt_to_page(cpu_addr), (unsigned long)cpu_addr % PAGE_SIZE, size, direction, attrs); @@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->unmap_single) { - dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); - return; - } - dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); } @@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->map_page) - return dma_ops->map_page(dev, page, offset, size, direction, - attrs); - - return dma_ops->map_single(dev, page_address(page) + offset, size, - direction, attrs); + return dma_ops->map_page(dev, page, offset, size, direction, attrs); } static inline void dma_unmap_page_attrs(struct device *dev, @@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->unmap_page) { - dma_ops->unmap_page(dev, dma_address, size, direction, attrs); - return; - } - - dma_ops->unmap_single(dev, dma_address, size, direction, attrs); + dma_ops->unmap_page(dev, dma_address, size, direction, attrs); } static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, @@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); } +#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - __dma_sync(bus_to_virt(dma_handle), size, direction); + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, + size, direction); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - __dma_sync(bus_to_virt(dma_handle), size, direction); + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_device(dev, dma_handle, + 0, size, direction); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { - struct scatterlist *sg; - int i; + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - BUG_ON(direction == DMA_NONE); + BUG_ON(!dma_ops); + dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); +} + +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_sg_for_device(dev, sgl, nents, direction); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_cpu(dev, dma_handle, + offset, size, direction); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_device(dev, dma_handle, offset, + size, direction); +} +#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { - struct scatterlist *sg; - int i; +} - BUG_ON(direction == DMA_NONE); +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} - for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ } +#endif static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { @@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void) #endif } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - /* just sync everything for now */ - dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - /* just sync everything for now */ - dma_sync_single_for_device(dev, dma_handle, offset + size, direction); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index b886bec67016..66ea9b8b95c5 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -17,8 +17,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifndef _PPC64_EEH_H -#define _PPC64_EEH_H +#ifndef _POWERPC_EEH_H +#define _POWERPC_EEH_H #ifdef __KERNEL__ #include <linux/init.h> @@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { } #define EEH_IO_ERROR_VALUE(size) (-1UL) #endif /* CONFIG_EEH */ +#ifdef CONFIG_PPC64 /* * MMIO read/write operations with EEH support. */ @@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, eeh_check_failure(addr, *(u32*)buf); } +#endif /* CONFIG_PPC64 */ #endif /* __KERNEL__ */ -#endif /* _PPC64_EEH_H */ +#endif /* _POWERPC_EEH_H */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index d812929390e4..cd46f023ec6d 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -267,7 +267,7 @@ extern int ucache_bsize; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index a1029967620b..e4094a5cb05b 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -81,6 +81,36 @@ label##5: \ #define ALT_FTR_SECTION_END_IFCLR(msk) \ ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) +/* MMU feature dependent sections */ +#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97) + +#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup) + +#define END_MMU_FTR_SECTION(msk, val) \ + END_MMU_FTR_SECTION_NESTED(msk, val, 97) + +#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk)) +#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0) + +/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ +#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) +#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97) +#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup) +#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_MMU_FTR_SECTION_END(msk, val) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \ + ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \ + ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + /* Firmware feature dependent sections */ #define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) #define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index b298f7a631e6..e5f2ae8362f7 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -7,7 +7,19 @@ #ifndef __ASSEMBLY__ extern void _mcount(void); -#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* reloction of mcount call site is the same as the address */ + return addr; +} + +struct dyn_arch_ftrace { + struct module *mod; +}; +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 91c589520c0a..04e4a620952e 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table; * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ -#define LAST_PKMAP (1 << PTE_SHIFT) -#define LAST_PKMAP_MASK (LAST_PKMAP-1) +/* + * We use one full pte table with 4K pages. And with 16K/64K pages pte + * table covers enough memory (32MB and 512MB resp.) that both FIXMAP + * and PKMAP can be placed in single pte table. We use 1024 pages for + * PKMAP in case of 16K/64K pages. + */ +#ifdef CONFIG_PPC_4K_PAGES +#define PKMAP_ORDER PTE_SHIFT +#else +#define PKMAP_ORDER 10 +#endif +#define LAST_PKMAP (1 << PKMAP_ORDER) +#ifndef CONFIG_PPC_4K_PAGES +#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) +#else #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) +#endif +#define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) @@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro BUG_ON(!pte_none(*(kmap_pte-idx))); #endif __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); - flush_tlb_page(NULL, vaddr); + local_flush_tlb_page(NULL, vaddr); return (void*) vaddr; } @@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); - flush_tlb_page(NULL, vaddr); + local_flush_tlb_page(NULL, vaddr); #endif pagefault_enable(); } diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 08266d2728b3..494cd8b0a278 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address) */ #define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) -/* We do NOT want virtual merging, it would put too much pressure on - * our iommu allocator. Instead, we want drivers to be smart enough - * to coalesce sglists that happen to have been mapped in a contiguous - * way by the iommu - */ -#define BIO_VMERGE_BOUNDARY 0 - /* * 32 bits still uses virt_to_bus() for it's implementation of DMA * mappings se we have to keep it defined here. We also have some old diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h index b07ebb9784d3..5ebfe5d3c61f 100644 --- a/arch/powerpc/include/asm/kdump.h +++ b/arch/powerpc/include/asm/kdump.h @@ -1,6 +1,8 @@ #ifndef _PPC64_KDUMP_H #define _PPC64_KDUMP_H +#include <asm/page.h> + /* Kdump kernel runs at 32 MB, change at your peril. */ #define KDUMP_KERNELBASE 0x2000000 @@ -11,8 +13,19 @@ #ifdef CONFIG_CRASH_DUMP +/* + * On PPC64 translation is disabled during trampoline setup, so we use + * physical addresses. Though on PPC32 translation is already enabled, + * so we can't do the same. Luckily create_trampoline() creates relative + * branches, so we can just add the PAGE_OFFSET and don't worry about it. + */ +#ifdef __powerpc64__ #define KDUMP_TRAMPOLINE_START 0x0100 #define KDUMP_TRAMPOLINE_END 0x3000 +#else +#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET) +#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET) +#endif /* __powerpc64__ */ #define KDUMP_MIN_TCE_ENTRIES 2048 diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 3736d9b33289..6dbffc981702 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -33,12 +33,12 @@ #ifndef __ASSEMBLY__ #include <linux/cpumask.h> +#include <asm/reg.h> typedef void (*crash_shutdown_t)(void); #ifdef CONFIG_KEXEC -#ifdef __powerpc64__ /* * This function is responsible for capturing register states if coming * via panic or invoking dump using sysrq-trigger. @@ -48,6 +48,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs, { if (oldregs) memcpy(newregs, oldregs, sizeof(*newregs)); +#ifdef __powerpc64__ else { /* FIXME Merge this with xmon_save_regs ?? */ unsigned long tmp1, tmp2; @@ -100,15 +101,11 @@ static inline void crash_setup_regs(struct pt_regs *newregs, : "b" (newregs) : "memory"); } -} #else -/* - * Provide a dummy definition to avoid build failures. Will remain - * empty till crash dump support is enabled. - */ -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) { } -#endif /* !__powerpc64 __ */ + else + ppc_save_regs(newregs); +#endif /* __powerpc64__ */ +} extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index 612d83276653..84b457a3c1bc 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h @@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l) bne- 1b" : "=&r" (t) : "r" (&(l->a.counter)) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l) bne- 1b" : "=&r" (t) : "r" (&(l->a.counter)) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 2fe268b10333..25aaa97facd8 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -133,7 +133,8 @@ struct lppaca { //============================================================================= // CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data //============================================================================= - u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF + u32 page_ins; // CMO Hint - # page ins by OS x00-x04 + u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF } __attribute__((__aligned__(0x400))); extern struct lppaca lppaca[]; diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h index 3d108676584c..776f415a36aa 100644 --- a/arch/powerpc/include/asm/mmu-40x.h +++ b/arch/powerpc/include/asm/mmu-40x.h @@ -54,8 +54,9 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long id; - unsigned long vdso_base; + unsigned int id; + unsigned int active; + unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h index a825524c981a..8a97cfb08b7e 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/mmu-44x.h @@ -4,6 +4,8 @@ * PPC440 support */ +#include <asm/page.h> + #define PPC44x_MMUCR_TID 0x000000ff #define PPC44x_MMUCR_STS 0x00010000 @@ -56,8 +58,9 @@ extern unsigned int tlb_44x_hwater; typedef struct { - unsigned long id; - unsigned long vdso_base; + unsigned int id; + unsigned int active; + unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ @@ -73,4 +76,19 @@ typedef struct { /* Size of the TLBs used for pinning in lowmem */ #define PPC_PIN_SIZE (1 << 28) /* 256M */ +#if (PAGE_SHIFT == 12) +#define PPC44x_TLBE_SIZE PPC44x_TLB_4K +#elif (PAGE_SHIFT == 14) +#define PPC44x_TLBE_SIZE PPC44x_TLB_16K +#elif (PAGE_SHIFT == 16) +#define PPC44x_TLBE_SIZE PPC44x_TLB_64K +#else +#error "Unsupported PAGE_SIZE" +#endif + +#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2) +#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2) +#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2) +#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT) + #endif /* _ASM_POWERPC_MMU_44X_H_ */ diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 9db877eb88db..07865a357848 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -137,7 +137,8 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long id; + unsigned int id; + unsigned int active; unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h index 925d93cf64d8..3f941c0f7e8e 100644 --- a/arch/powerpc/include/asm/mmu-fsl-booke.h +++ b/arch/powerpc/include/asm/mmu-fsl-booke.h @@ -40,6 +40,8 @@ #define MAS2_M 0x00000004 #define MAS2_G 0x00000002 #define MAS2_E 0x00000001 +#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10)) +#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) #define MAS3_RPN 0xFFFFF000 #define MAS3_U0 0x00000200 @@ -74,8 +76,9 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long id; - unsigned long vdso_base; + unsigned int id; + unsigned int active; + unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 4c0e1b4f975c..6e7639911318 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -2,6 +2,63 @@ #define _ASM_POWERPC_MMU_H_ #ifdef __KERNEL__ +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> + +/* + * MMU features bit definitions + */ + +/* + * First half is MMU families + */ +#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) +#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) +#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) +#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) +#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) + +/* + * This is individual features + */ + +/* Enable use of high BAT registers */ +#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000) + +/* Enable >32-bit physical addresses on 32-bit processor, only used + * by CONFIG_6xx currently as BookE supports that from day 1 + */ +#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000) + +/* Enable use of broadcast TLB invalidations. We don't always set it + * on processors that support it due to other constraints with the + * use of such invalidations + */ +#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) + +/* Enable use of tlbilx invalidate-by-PID variant. + */ +#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000) + +/* This indicates that the processor cannot handle multiple outstanding + * broadcast tlbivax or tlbsync. This makes the code use a spinlock + * around such invalidate forms. + */ +#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000) + +#ifndef __ASSEMBLY__ +#include <asm/cputable.h> + +static inline int mmu_has_feature(unsigned long feature) +{ + return (cur_cpu_spec->mmu_features & feature); +} + +extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; + +#endif /* !__ASSEMBLY__ */ + + #ifdef CONFIG_PPC64 /* 64-bit classic hash table MMU */ # include <asm/mmu-hash64.h> diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 6b993ef452ff..ab4f19263c42 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -2,237 +2,26 @@ #define __ASM_POWERPC_MMU_CONTEXT_H #ifdef __KERNEL__ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/spinlock.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm-generic/mm_hooks.h> - -#ifndef CONFIG_PPC64 -#include <asm/atomic.h> -#include <linux/bitops.h> - -/* - * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs - * (virtual segment identifiers) for each context. Although the - * hardware supports 24-bit VSIDs, and thus >1 million contexts, - * we only use 32,768 of them. That is ample, since there can be - * at most around 30,000 tasks in the system anyway, and it means - * that we can use a bitmap to indicate which contexts are in use. - * Using a bitmap means that we entirely avoid all of the problems - * that we used to have when the context number overflowed, - * particularly on SMP systems. - * -- paulus. - */ - -/* - * This function defines the mapping from contexts to VSIDs (virtual - * segment IDs). We use a skew on both the context and the high 4 bits - * of the 32-bit virtual address (the "effective segment ID") in order - * to spread out the entries in the MMU hash table. Note, if this - * function is changed then arch/ppc/mm/hashtable.S will have to be - * changed to correspond. - */ -#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ - & 0xffffff) - -/* - The MPC8xx has only 16 contexts. We rotate through them on each - task switch. A better way would be to keep track of tasks that - own contexts, and implement an LRU usage. That way very active - tasks don't always have to pay the TLB reload overhead. The - kernel pages are mapped shared, so the kernel can run on behalf - of any task that makes a kernel entry. Shared does not mean they - are not protected, just that the ASID comparison is not performed. - -- Dan - - The IBM4xx has 256 contexts, so we can just rotate through these - as a way of "switching" contexts. If the TID of the TLB is zero, - the PID/TID comparison is disabled, so we can use a TID of zero - to represent all kernel pages as shared among all contexts. - -- Dan - */ - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - -#ifdef CONFIG_8xx -#define NO_CONTEXT 16 -#define LAST_CONTEXT 15 -#define FIRST_CONTEXT 0 - -#elif defined(CONFIG_4xx) -#define NO_CONTEXT 256 -#define LAST_CONTEXT 255 -#define FIRST_CONTEXT 1 - -#elif defined(CONFIG_E200) || defined(CONFIG_E500) -#define NO_CONTEXT 256 -#define LAST_CONTEXT 255 -#define FIRST_CONTEXT 1 - -#else - -/* PPC 6xx, 7xx CPUs */ -#define NO_CONTEXT ((unsigned long) -1) -#define LAST_CONTEXT 32767 -#define FIRST_CONTEXT 1 -#endif - -/* - * Set the current MMU context. - * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by - * loading up the segment registers for the user part of the address space. - * - * Since the PGD is immediately available, it is much faster to simply - * pass this along as a second parameter, which is required for 8xx and - * can be used for debugging on all processors (if you happen to have - * an Abatron). - */ -extern void set_context(unsigned long contextid, pgd_t *pgd); - -/* - * Bitmap of contexts in use. - * The size of this bitmap is LAST_CONTEXT + 1 bits. - */ -extern unsigned long context_map[]; - -/* - * This caches the next context number that we expect to be free. - * Its use is an optimization only, we can't rely on this context - * number to be free, but it usually will be. - */ -extern unsigned long next_mmu_context; - -/* - * If we don't have sufficient contexts to give one to every task - * that could be in the system, we need to be able to steal contexts. - * These variables support that. - */ -#if LAST_CONTEXT < 30000 -#define FEW_CONTEXTS 1 -extern atomic_t nr_free_contexts; -extern struct mm_struct *context_mm[LAST_CONTEXT+1]; -extern void steal_context(void); -#endif - -/* - * Get a new mmu context for the address space described by `mm'. - */ -static inline void get_mmu_context(struct mm_struct *mm) -{ - unsigned long ctx; - - if (mm->context.id != NO_CONTEXT) - return; -#ifdef FEW_CONTEXTS - while (atomic_dec_if_positive(&nr_free_contexts) < 0) - steal_context(); -#endif - ctx = next_mmu_context; - while (test_and_set_bit(ctx, context_map)) { - ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); - if (ctx > LAST_CONTEXT) - ctx = 0; - } - next_mmu_context = (ctx + 1) & LAST_CONTEXT; - mm->context.id = ctx; -#ifdef FEW_CONTEXTS - context_mm[ctx] = mm; -#endif -} - -/* - * Set up the context for a new address space. - */ -static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) -{ - mm->context.id = NO_CONTEXT; - return 0; -} - -/* - * We're finished using the context for an address space. - */ -static inline void destroy_context(struct mm_struct *mm) -{ - preempt_disable(); - if (mm->context.id != NO_CONTEXT) { - clear_bit(mm->context.id, context_map); - mm->context.id = NO_CONTEXT; -#ifdef FEW_CONTEXTS - atomic_inc(&nr_free_contexts); -#endif - } - preempt_enable(); -} - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ -#ifdef CONFIG_ALTIVEC - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile ("dssall;\n" -#ifndef CONFIG_POWER4 - "sync;\n" /* G4 needs a sync here, G5 apparently not */ -#endif - : : ); -#endif /* CONFIG_ALTIVEC */ - - tsk->thread.pgdir = next->pgd; - - /* No need to flush userspace segments if the mm doesnt change */ - if (prev == next) - return; - - /* Setup new userspace context */ - get_mmu_context(next); - set_context(next->context.id, next->pgd); -} - -#define deactivate_mm(tsk,mm) do { } while (0) +#include <asm/cputhreads.h> /* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. + * Most if the context management is out of line */ -#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current) - extern void mmu_context_init(void); - - -#else - -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/sched.h> - -/* - * Copyright (C) 2001 PPC 64 Team, IBM Corp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -static inline void enter_lazy_tlb(struct mm_struct *mm, - struct task_struct *tsk) -{ -} - -/* - * The proto-VSID space has 2^35 - 1 segments available for user mappings. - * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, - * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). - */ -#define NO_CONTEXT 0 -#define MAX_CONTEXT ((1UL << 19) - 1) - extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); +extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); +extern void set_context(unsigned long id, pgd_t *pgd); /* * switch_mm is the entry point called from the architecture independent @@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) - cpu_set(smp_processor_id(), next->cpu_vm_mask); + /* Mark this context has been used on the new CPU */ + cpu_set(smp_processor_id(), next->cpu_vm_mask); + + /* 32-bit keeps track of the current PGDIR in the thread struct */ +#ifdef CONFIG_PPC32 + tsk->thread.pgdir = next->pgd; +#endif /* CONFIG_PPC32 */ - /* No need to flush userspace segments if the mm doesnt change */ + /* Nothing else to do if we aren't actually switching */ if (prev == next) return; + /* We must stop all altivec streams before changing the HW + * context + */ #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) asm volatile ("dssall"); #endif /* CONFIG_ALTIVEC */ + /* The actual HW switching method differs between the various + * sub architectures. + */ +#ifdef CONFIG_PPC_STD_MMU_64 if (cpu_has_feature(CPU_FTR_SLB)) switch_slb(tsk, next); else switch_stab(tsk, next); +#else + /* Out of line for now */ + switch_mmu_context(prev, next); +#endif + } #define deactivate_mm(tsk,mm) do { } while (0) @@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) local_irq_restore(flags); } -#endif /* CONFIG_PPC64 */ +/* We don't currently use enter_lazy_tlb() for anything */ +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index e5f14b13ccf0..08454880a2c0 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -34,11 +34,19 @@ struct mod_arch_specific { #ifdef __powerpc64__ unsigned int stubs_section; /* Index of stubs section in module */ unsigned int toc_section; /* What section is the TOC? */ -#else +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long toc; + unsigned long tramp; +#endif + +#else /* powerpc64 */ /* Indices of PLT sections within module. */ unsigned int core_plt_section; unsigned int init_plt_section; +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long tramp; #endif +#endif /* powerpc64 */ /* List of BUG addresses, source line numbers and filenames */ struct list_head bug_list; @@ -68,6 +76,12 @@ struct mod_arch_specific { # endif /* MODULE */ #endif +#ifdef CONFIG_DYNAMIC_FTRACE +# ifdef MODULE + asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous"); +# endif /* MODULE */ +#endif + struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 81ef10b6b672..81a23932a160 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -239,6 +239,25 @@ struct mpc52xx_cdm { u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */ }; +/* Interrupt controller Register set */ +struct mpc52xx_intr { + u32 per_mask; /* INTR + 0x00 */ + u32 per_pri1; /* INTR + 0x04 */ + u32 per_pri2; /* INTR + 0x08 */ + u32 per_pri3; /* INTR + 0x0c */ + u32 ctrl; /* INTR + 0x10 */ + u32 main_mask; /* INTR + 0x14 */ + u32 main_pri1; /* INTR + 0x18 */ + u32 main_pri2; /* INTR + 0x1c */ + u32 reserved1; /* INTR + 0x20 */ + u32 enc_status; /* INTR + 0x24 */ + u32 crit_status; /* INTR + 0x28 */ + u32 main_status; /* INTR + 0x2c */ + u32 per_status; /* INTR + 0x30 */ + u32 reserved2; /* INTR + 0x34 */ + u32 per_error; /* INTR + 0x38 */ +}; + #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index 8917ed630565..a218da6bec7c 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -68,12 +68,20 @@ #define MPC52xx_PSC_IMR_ORERR 0x1000 #define MPC52xx_PSC_IMR_IPC 0x8000 -/* PSC input port change bit */ +/* PSC input port change bits */ #define MPC52xx_PSC_CTS 0x01 #define MPC52xx_PSC_DCD 0x02 #define MPC52xx_PSC_D_CTS 0x10 #define MPC52xx_PSC_D_DCD 0x20 +/* PSC acr bits */ +#define MPC52xx_PSC_IEC_CTS 0x01 +#define MPC52xx_PSC_IEC_DCD 0x02 + +/* PSC output port bits */ +#define MPC52xx_PSC_OP_RTS 0x01 +#define MPC52xx_PSC_OP_RES 0x02 + /* PSC mode fields */ #define MPC52xx_PSC_MODE_5_BITS 0x00 #define MPC52xx_PSC_MODE_6_BITS 0x01 @@ -91,6 +99,7 @@ #define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00 #define MPC52xx_PSC_MODE_ONE_STOP 0x07 #define MPC52xx_PSC_MODE_TWO_STOP 0x0f +#define MPC52xx_PSC_MODE_TXCTS 0x10 #define MPC52xx_PSC_RFNUM_MASK 0x01ff diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h index 458c1f7fbc18..dabc01c727b8 100644 --- a/arch/powerpc/include/asm/mutex.h +++ b/arch/powerpc/include/asm/mutex.h @@ -1,9 +1,134 @@ /* - * Pull in the generic implementation for the mutex fastpath. + * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm + */ +#ifndef _ASM_POWERPC_MUTEX_H +#define _ASM_POWERPC_MUTEX_H + +static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new) +{ + int t; + + __asm__ __volatile__ ( +"1: lwarx %0,0,%1 # mutex trylock\n\ + cmpw 0,%0,%2\n\ + bne- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %3,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + "\n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (old), "r" (new) + : "cc", "memory"); + + return t; +} + +static inline int __mutex_dec_return_lock(atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%1 # mutex lock\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +static inline int __mutex_inc_return_unlock(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + LWSYNC_ON_SMP +"1: lwarx %0,0,%1 # mutex unlock\n\ + addic %0,%0,1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1 \n\ + bne- 1b" + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(__mutex_dec_return_lock(count) < 0)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, + * or anything the slow path function returns. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (unlikely(__mutex_dec_return_lock(count) < 0)) + return fail_fn(count); + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(__mutex_inc_return_unlock(count) <= 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) + * Change the count from 1 to 0, and return 1 (success), or if the count + * was not 1, then return 0 (failure). */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1)) + return 1; + return 0; +} -#include <asm-generic/mutex-dec.h> +#endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index c0b8d4a29a91..197d569f5bd3 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -19,12 +19,15 @@ #include <asm/kdump.h> /* - * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software + * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages + * on PPC44x). For PPC64 we support either 4K or 64K software * page size. When using 64K pages however, whether we are really supporting * 64K pages in HW or not is irrelevant to those definitions. */ -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) #define PAGE_SHIFT 16 +#elif defined(CONFIG_PPC_16K_PAGES) +#define PAGE_SHIFT 14 #else #define PAGE_SHIFT 12 #endif @@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t; /* 64k pages additionally define a bigger "real PTE" type that gathers * the "second half" part of the PTE for pseudo 64k pages */ -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; #else typedef struct { pte_t pte; } real_pte_t; @@ -191,10 +194,10 @@ typedef pte_basic_t pte_t; #define pte_val(x) (x) #define __pte(x) (x) -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; #else -typedef unsigned long real_pte_t; +typedef pte_t real_pte_t; #endif diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index d77072a32cc6..1458d9500381 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -19,6 +19,8 @@ #define PTE_FLAGS_OFFSET 0 #endif +#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ + #ifndef __ASSEMBLY__ /* * The basic type of a PTE - 64 bits for those CPUs with > 32 bit @@ -26,10 +28,8 @@ */ #ifdef CONFIG_PTE_64BIT typedef unsigned long long pte_basic_t; -#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ #else typedef unsigned long pte_basic_t; -#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ #endif struct page; @@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from); #include <asm-generic/page.h> +#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) +#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PAGE_32_H */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 9047af7baa69..84007afabdb5 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -13,7 +13,6 @@ struct device_node; -extern unsigned int ppc_pci_flags; enum { /* Force re-assigning all resources (ignore firmware * setup completely) @@ -36,6 +35,31 @@ enum { /* ... except for domain 0 */ PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020, }; +#ifdef CONFIG_PCI +extern unsigned int ppc_pci_flags; + +static inline void ppc_pci_set_flags(int flags) +{ + ppc_pci_flags = flags; +} + +static inline void ppc_pci_add_flags(int flags) +{ + ppc_pci_flags |= flags; +} + +static inline int ppc_pci_has_flag(int flag) +{ + return (ppc_pci_flags & flag); +} +#else +static inline void ppc_pci_set_flags(int flags) { } +static inline void ppc_pci_add_flags(int flags) { } +static inline int ppc_pci_has_flag(int flag) +{ + return 0; +} +#endif /* @@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus); /** Discover new pci devices under this bus, and add them */ extern void pcibios_add_pci_devices(struct pci_bus *bus); -extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus); - -extern int pcibios_remove_root_bus(struct pci_controller *phb); static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) { @@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, /* Allocate & free a PCI host bridge structure */ extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); extern void pcibios_free_controller(struct pci_controller *phb); +extern void pcibios_setup_phb_resources(struct pci_controller *hose); #ifdef CONFIG_PCI extern unsigned long pci_address_to_pio(phys_addr_t address); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 57a2a494886b..3548159a1beb 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -38,8 +38,8 @@ struct pci_dev; * Set this to 1 if you want the kernel to re-assign all PCI * bus numbers (don't do that on ppc64 yet !) */ -#define pcibios_assign_all_busses() (ppc_pci_flags & \ - PPC_PCI_REASSIGN_ALL_BUS) +#define pcibios_assign_all_busses() \ + (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS)) #define pcibios_scan_all_fns(a, b) 0 static inline void pcibios_set_master(struct pci_dev *dev) @@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev, return root; } -extern void pcibios_setup_new_device(struct pci_dev *dev); - extern void pcibios_claim_one_bus(struct pci_bus *b); -extern void pcibios_allocate_bus_resources(struct pci_bus *bus); +extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); extern void pcibios_resource_survey(void); extern struct pci_controller *init_phb_dynamic(struct device_node *dn); +extern int remove_phb_dynamic(struct pci_controller *phb); extern struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_bus *bus, int devfn); @@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev); extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); +extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); extern int pci_read_irq_line(struct pci_dev *dev); @@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end); -extern void pcibios_do_bus_setup(struct pci_bus *bus); -extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); - +extern void pcibios_setup_bus_devices(struct pci_bus *bus); +extern void pcibios_setup_bus_self(struct pci_bus *bus); #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 58c07147b3ea..0815eb40acae 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -3,6 +3,8 @@ #include <linux/threads.h> +#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */ + extern void __bad_pte(pmd_t *pmd); extern pgd_t *pgd_alloc(struct mm_struct *mm); @@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); -extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); -extern void pte_free(struct mm_struct *mm, pgtable_t pte); -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +static inline void pgtable_free(pgtable_free_t pgf) +{ + void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); + + free_page((unsigned long)p); +} #define check_pgt_cache() do { } while (0) diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 812a1d8f35cb..afda2bdd860f 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -7,7 +7,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/mm.h> #include <linux/slab.h> #include <linux/cpumask.h> #include <linux/percpu.h> @@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, return page; } -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) -{ - pgtable_page_dtor(ptepage); - __free_page(ptepage); -} - -#define PGF_CACHENUM_MASK 0x7 - -typedef struct pgtable_free { - unsigned long val; -} pgtable_free_t; - -static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, - unsigned long mask) -{ - BUG_ON(cachenum > PGF_CACHENUM_MASK); - - return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; -} - static inline void pgtable_free(pgtable_free_t pgf) { void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); @@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); - -#define __pte_free_tlb(tlb,ptepage) \ -do { \ - pgtable_page_dtor(ptepage); \ - pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ -} while (0) #define __pmd_free_tlb(tlb, pmd) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index b4505ed0f0f2..5d8480265a77 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -2,11 +2,52 @@ #define _ASM_POWERPC_PGALLOC_H #ifdef __KERNEL__ +#include <linux/mm.h> + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) +{ + pgtable_page_dtor(ptepage); + __free_page(ptepage); +} + +typedef struct pgtable_free { + unsigned long val; +} pgtable_free_t; + +#define PGF_CACHENUM_MASK 0x7 + +static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, + unsigned long mask) +{ + BUG_ON(cachenum > PGF_CACHENUM_MASK); + + return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; +} + #ifdef CONFIG_PPC64 #include <asm/pgalloc-64.h> #else #include <asm/pgalloc-32.h> #endif +extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); + +#ifdef CONFIG_SMP +#define __pte_free_tlb(tlb,ptepage) \ +do { \ + pgtable_page_dtor(ptepage); \ + pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ +} while (0) +#else +#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#endif + + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 6ab7c67cb5ab..f69a4d977729 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -228,9 +228,10 @@ extern int icache_44x_need_flush; * - FILE *must* be in the bottom three bits because swap cache * entries use the top 29 bits for TLB2. * - * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it - * doesn't support SMP. So we can use this as software bit, like - * DIRTY. + * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, + * because it doesn't support SMP. However, some later 460 variants + * have -some- form of SMP support and so I keep the bit there for + * future use * * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used * for memory protection related functions (see PTE structure in @@ -436,20 +437,23 @@ extern int icache_44x_need_flush; _PAGE_USER | _PAGE_ACCESSED | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ _PAGE_EXEC | _PAGE_HWEXEC) + /* - * Note: the _PAGE_COHERENT bit automatically gets set in the hardware - * PTE if CONFIG_SMP is defined (hash_page does this); there is no need - * to have it in the Linux PTE, and in fact the bit could be reused for - * another purpose. -- paulus. + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. */ - -#ifdef CONFIG_44x -#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED) +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) #else #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) #endif +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) + #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) +#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) #ifdef CONFIG_PPC_STD_MMU /* On standard PPC MMU, no user access implies kernel read/write access, @@ -459,7 +463,7 @@ extern int icache_44x_need_flush; #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) #endif -#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) +#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ @@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } -static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } - static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } static inline pte_t pte_mkclean(pte_t pte) { @@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, #endif } + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { -#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) +#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM) WARN_ON(pte_present(*ptep)); #endif __set_pte_at(mm, addr, ptep, pte); @@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) __changed; \ }) -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 4c0a8c62859d..b0f18be81d9f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -100,7 +100,7 @@ #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) -/* __pgprot defined in arch/powerpc/incliude/asm/page.h */ +/* __pgprot defined in arch/powerpc/include/asm/page.h */ #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) @@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } -static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } - static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW); return pte; } static inline pte_t pte_mkclean(pte_t pte) { @@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) __changed; \ }) -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index dbb8ca172e44..07f55e601696 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -16,6 +16,32 @@ struct mm_struct; #endif #ifndef __ASSEMBLY__ + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + +#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT)) + +#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT | _PAGE_WRITETHRU)) + + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index c4a029ccb4d3..1a0d628eb114 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #define fromreal(rd) tovirt(rd,rd) #define tophys(rd,rs) \ -0: addis rd,rs,-KERNELBASE@h; \ +0: addis rd,rs,-PAGE_OFFSET@h; \ .section ".vtop_fixup","aw"; \ .align 1; \ .long 0b; \ .previous #define tovirt(rd,rs) \ -0: addis rd,rs,KERNELBASE@h; \ +0: addis rd,rs,PAGE_OFFSET@h; \ .section ".ptov_fixup","aw"; \ .align 1; \ .long 0b; \ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 101ed87f7d84..d3466490104a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -69,8 +69,6 @@ extern int _prep_type; #ifdef __KERNEL__ -extern int have_of; - struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void release_thread(struct task_struct *); @@ -207,6 +205,11 @@ struct thread_struct { #define INIT_SP_LIMIT \ (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) +#ifdef CONFIG_SPE +#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, +#else +#define SPEFSCR_INIT +#endif #ifdef CONFIG_PPC32 #define INIT_THREAD { \ @@ -215,6 +218,7 @@ struct thread_struct { .fs = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ + SPEFSCR_INIT \ } #else #define INIT_THREAD { \ diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index eb3bd2e1c7f6..6ff04185d2aa 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void); /* CPU OF node matching */ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); +/* cache lookup */ +struct device_node *of_find_next_cache_node(struct device_node *np); + /* Get the MAC address */ extern const void *of_get_mac_address(struct device_node *np); diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index f9e34c493cbb..cff30c0ef1ff 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -305,30 +305,34 @@ static inline const char* ps3_result(int result) /* system bus routines */ enum ps3_match_id { - PS3_MATCH_ID_EHCI = 1, - PS3_MATCH_ID_OHCI = 2, - PS3_MATCH_ID_GELIC = 3, - PS3_MATCH_ID_AV_SETTINGS = 4, - PS3_MATCH_ID_SYSTEM_MANAGER = 5, - PS3_MATCH_ID_STOR_DISK = 6, - PS3_MATCH_ID_STOR_ROM = 7, - PS3_MATCH_ID_STOR_FLASH = 8, - PS3_MATCH_ID_SOUND = 9, - PS3_MATCH_ID_GRAPHICS = 10, - PS3_MATCH_ID_LPM = 11, + PS3_MATCH_ID_EHCI = 1, + PS3_MATCH_ID_OHCI = 2, + PS3_MATCH_ID_GELIC = 3, + PS3_MATCH_ID_AV_SETTINGS = 4, + PS3_MATCH_ID_SYSTEM_MANAGER = 5, + PS3_MATCH_ID_STOR_DISK = 6, + PS3_MATCH_ID_STOR_ROM = 7, + PS3_MATCH_ID_STOR_FLASH = 8, + PS3_MATCH_ID_SOUND = 9, + PS3_MATCH_ID_GPU = 10, + PS3_MATCH_ID_LPM = 11, }; -#define PS3_MODULE_ALIAS_EHCI "ps3:1" -#define PS3_MODULE_ALIAS_OHCI "ps3:2" -#define PS3_MODULE_ALIAS_GELIC "ps3:3" -#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4" -#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5" -#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6" -#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7" -#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8" -#define PS3_MODULE_ALIAS_SOUND "ps3:9" -#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10" -#define PS3_MODULE_ALIAS_LPM "ps3:11" +enum ps3_match_sub_id { + PS3_MATCH_SUB_ID_GPU_FB = 1, +}; + +#define PS3_MODULE_ALIAS_EHCI "ps3:1:0" +#define PS3_MODULE_ALIAS_OHCI "ps3:2:0" +#define PS3_MODULE_ALIAS_GELIC "ps3:3:0" +#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0" +#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0" +#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0" +#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0" +#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0" +#define PS3_MODULE_ALIAS_SOUND "ps3:9:0" +#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1" +#define PS3_MODULE_ALIAS_LPM "ps3:11:0" enum ps3_system_bus_device_type { PS3_DEVICE_TYPE_IOC0 = 1, @@ -337,11 +341,6 @@ enum ps3_system_bus_device_type { PS3_DEVICE_TYPE_LPM, }; -enum ps3_match_sub_id { - /* for PS3_MATCH_ID_GRAPHICS */ - PS3_MATCH_SUB_ID_FB = 1, -}; - /** * struct ps3_system_bus_device - a device on the system bus */ @@ -516,4 +515,7 @@ void ps3_sync_irq(int node); u32 ps3_get_hw_thread_id(int cpu); u64 ps3_get_spe_id(void *arg); +/* mutex synchronizing GPU accesses and video mode changes */ +extern struct mutex ps3_gpu_mutex; + #endif diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index 5aa22cffdbd6..cd24ac16660a 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int); extern int ps3av_audio_mute_analog(int); extern int ps3av_dev_open(void); extern int ps3av_dev_close(void); -extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data), - void *flip_data); -extern void ps3av_flip_ctl(int on); - #endif /* _ASM_POWERPC_PS3AV_H_ */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c6d1ab650778..f484a343efba 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value); #define __get_SP() ({unsigned long sp; \ asm volatile("mr %0,1": "=r" (sp)); sp;}) +struct pt_regs; + +extern void ppc_save_regs(struct pt_regs *regs); + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_REG_H */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 8eaa7b28d9d0..e0175beb4462 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -168,6 +168,7 @@ extern void rtas_os_term(char *str); extern int rtas_get_sensor(int sensor, int index, int *state); extern int rtas_get_power_level(int powerdomain, int *level); extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); +extern bool rtas_indicator_present(int token, int *maxindex); extern int rtas_set_indicator(int indicator, int index, int new_value); extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index ced34f1dc8f8..3d9f831c3c55 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -82,7 +82,7 @@ #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) -#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y) +#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) /* These macros define what NaN looks like. They're supposed to expand to @@ -97,6 +97,20 @@ #define _FP_KEEPNANFRACP 1 +#ifdef FP_EX_BOOKE_E500_SPE +#define FP_EX_INEXACT (1 << 21) +#define FP_EX_INVALID (1 << 20) +#define FP_EX_DIVZERO (1 << 19) +#define FP_EX_UNDERFLOW (1 << 18) +#define FP_EX_OVERFLOW (1 << 17) +#define FP_INHIBIT_RESULTS 0 + +#define __FPU_FPSCR (current->thread.spefscr) +#define __FPU_ENABLED_EXC \ +({ \ + (__FPU_FPSCR >> 2) & 0x1f; \ +}) +#else /* Exception flags. We use the bit positions of the appropriate bits in the FPSCR, which also correspond to the FE_* bits. This makes everything easier ;-). */ @@ -111,22 +125,6 @@ #define FP_EX_DIVZERO (1 << (31 - 5)) #define FP_EX_INEXACT (1 << (31 - 6)) -/* This macro appears to be called when both X and Y are NaNs, and - * has to choose one and copy it to R. i386 goes for the larger of the - * two, sparc64 just picks Y. I don't understand this at all so I'll - * go with sparc64 because it's shorter :-> -- PMM - */ -#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ - do { \ - R##_s = Y##_s; \ - _FP_FRAC_COPY_##wc(R,Y); \ - R##_c = FP_CLS_NAN; \ - } while (0) - - -#include <linux/kernel.h> -#include <linux/sched.h> - #define __FPU_FPSCR (current->thread.fpscr.val) /* We only actually write to the destination register @@ -137,6 +135,32 @@ (__FPU_FPSCR >> 3) & 0x1f; \ }) +#endif + +/* + * If one NaN is signaling and the other is not, + * we choose that one, otherwise we choose X. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ + do { \ + if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \ + && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \ + { \ + R##_s = X##_s; \ + _FP_FRAC_COPY_##wc(R,X); \ + } \ + else \ + { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R,Y); \ + } \ + R##_c = FP_CLS_NAN; \ + } while (0) + + +#include <linux/kernel.h> +#include <linux/sched.h> + #define __FPU_TRAP_P(bits) \ ((__FPU_ENABLED_EXC & (bits)) != 0) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 1866cec4f967..c25f73d1d842 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu); #define PPC_MSG_CALL_FUNC_SINGLE 2 #define PPC_MSG_DEBUGGER_BREAK 3 +/* + * irq controllers that have dedicated ipis per message and don't + * need additional code in the action handler may use this + */ +extern int smp_request_message_ipi(int virq, int message); +extern const char *smp_ipi_name[]; + void smp_init_iSeries(void); void smp_init_pSeries(void); void smp_init_cell(void); diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index f56a843f4705..36864364e601 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) bne- 1b" : "=&r"(tmp) : "r"(&rw->lock) - : "cr0", "memory"); + : "cr0", "xer", "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 45963e80f557..28f6ddbff4cf 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -5,6 +5,10 @@ #include <linux/stringify.h> #include <asm/feature-fixups.h> +#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) +#define __SUBARCH_HAS_LWSYNC +#endif + #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index d6648c143322..2a4be19a92c4 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -23,15 +23,17 @@ * read_barrier_depends() prevents data-dependent loads being reordered * across this point (nop on PPC). * - * We have to use the sync instructions for mb(), since lwsync doesn't - * order loads with respect to previous stores. Lwsync is fine for - * rmb(), though. Note that rmb() actually uses a sync on 32-bit - * architectures. + * *mb() variants without smp_ prefix must order all types of memory + * operations with one another. sync is the only instruction sufficient + * to do this. * - * For wmb(), we use sync since wmb is used in drivers to order - * stores to system memory with respect to writes to the device. - * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier - * on SMP since it is only used to order updates to system memory. + * For the smp_ barriers, ordering is for cacheable memory operations + * only. We have to use the sync instruction for smp_mb(), since lwsync + * doesn't order loads with respect to previous stores. Lwsync can be + * used for smp_rmb() and smp_wmb(). + * + * However, on CPUs that don't support lwsync, lwsync actually maps to a + * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. */ #define mb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("sync" : : : "memory") @@ -45,14 +47,14 @@ #ifdef CONFIG_SMP #ifdef __SUBARCH_HAS_LWSYNC -# define SMPWMB lwsync +# define SMPWMB LWSYNC #else # define SMPWMB eieio #endif #define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") +#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index febd581ec9b0..27ccb764fdab 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq; extern unsigned long ppc_tb_freq; #define DEFAULT_TB_FREQ 125000000UL -/* - * By putting all of this stuff into a single struct we - * reduce the number of cache lines touched by do_gettimeofday. - * Both by collecting all of the data in one cache line and - * by touching only one TOC entry on ppc64. - */ -struct gettimeofday_vars { - u64 tb_to_xs; - u64 stamp_xsec; - u64 tb_orig_stamp; -}; - -struct gettimeofday_struct { - unsigned long tb_ticks_per_sec; - struct gettimeofday_vars vars[2]; - struct gettimeofday_vars * volatile varp; - unsigned var_idx; - unsigned tb_to_us; -}; - struct div_result { u64 result_high; u64 result_low; diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index a2c6bfd85fb7..abbe3419d1dd 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -6,6 +6,9 @@ * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page + * - local_flush_tlb_mm(mm) flushes the specified mm context on + * the local processor + * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages @@ -17,7 +20,7 @@ */ #ifdef __KERNEL__ -#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) +#ifdef CONFIG_PPC_MMU_NOHASH /* * TLB flushing for software loaded TLB chips * @@ -28,63 +31,49 @@ #include <linux/mm.h> -extern void _tlbie(unsigned long address, unsigned int pid); -extern void _tlbil_all(void); -extern void _tlbil_pid(unsigned int pid); -extern void _tlbil_va(unsigned long address, unsigned int pid); +#define MMU_NO_CONTEXT ((unsigned int)-1) -#if defined(CONFIG_40x) || defined(CONFIG_8xx) -#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") -#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ -extern void _tlbia(void); -#endif - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - _tlbil_pid(mm->context.id); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) -{ - _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0); -} +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) -{ - flush_tlb_page(vma, vmaddr); -} +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - _tlbil_pid(vma->vm_mm->context.id); -} +#ifdef CONFIG_SMP +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +#else +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) +#endif +#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - _tlbil_pid(0); -} +#elif defined(CONFIG_PPC_STD_MMU_32) -#elif defined(CONFIG_PPC32) /* - * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx + * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx */ -extern void _tlbie(unsigned long address); -extern void _tlbia(void); - extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + flush_tlb_page(vma, vmaddr); +} +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + flush_tlb_mm(mm); +} + +#elif defined(CONFIG_PPC_STD_MMU_64) -#else /* - * TLB flushing for 64-bit has-MMU CPUs + * TLB flushing for 64-bit hash-MMU CPUs */ #include <linux/percpu.h> @@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, extern void flush_hash_range(unsigned long number, int local); +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ +} + static inline void flush_tlb_mm(struct mm_struct *mm) { } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { @@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end); - +#else +#error Unsupported MMU type #endif #endif /*__KERNEL__ */ diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index f01393224b52..13c2c283e178 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -39,6 +39,7 @@ #ifndef __ASSEMBLY__ #include <linux/unistd.h> +#include <linux/time.h> #define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) @@ -83,6 +84,7 @@ struct vdso_data { __u32 icache_log_block_size; /* L1 i-cache log block size */ __s32 wtom_clock_sec; /* Wall to monotonic clock */ __s32 wtom_clock_nsec; + struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -102,6 +104,7 @@ struct vdso_data { __u32 tz_dsttime; /* Type of dst correction 0x5C */ __s32 wtom_clock_sec; /* Wall to monotonic clock */ __s32 wtom_clock_nsec; + struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 92673b43858d..1308a86e9070 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -17,6 +17,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog +CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog ifdef CONFIG_DYNAMIC_FTRACE # dynamic ftrace setup. @@ -102,6 +103,10 @@ endif obj-$(CONFIG_PPC64) += $(obj64-y) +ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) +obj-y += ppc_save_regs.o +endif + extra-$(CONFIG_PPC_FPU) += fpu.o extra-$(CONFIG_PPC64) += entry_64.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 75c5dd0138fd..661d07d2146b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -60,6 +60,7 @@ int main(void) { DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(MM, offsetof(struct task_struct, mm)); + DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); #ifdef CONFIG_PPC64 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); #else @@ -306,6 +307,7 @@ int main(void) DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); + DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); @@ -378,6 +380,10 @@ int main(void) DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); #endif +#ifdef CONFIG_44x + DEFINE(PGD_T_LOG2, PGD_T_LOG2); + DEFINE(PTE_T_LOG2, PTE_T_LOG2); +#endif return 0; } diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 7e8719504f39..923f87aff20a 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -19,6 +19,7 @@ #include <asm/oprofile_impl.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ +#include <asm/mmu.h> struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); @@ -94,6 +95,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER3 (630)", .cpu_features = CPU_FTRS_POWER3, .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -109,6 +111,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER3 (630+)", .cpu_features = CPU_FTRS_POWER3, .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -124,6 +127,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-II (northstar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -139,6 +143,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-III (pulsar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -154,6 +159,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-III (icestar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -169,6 +175,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "RS64-IV (sstar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -184,6 +191,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4 (gp)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -199,6 +207,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4+ (gq)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -215,6 +224,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -233,6 +243,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -251,6 +262,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -269,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -287,6 +300,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -303,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5 (gr)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -323,6 +338,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -339,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -356,6 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -369,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -388,6 +407,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER6 (architected)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -400,6 +420,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (architected)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -412,6 +433,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -434,6 +456,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 4, @@ -449,6 +472,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "PA6T", .cpu_features = CPU_FTRS_PA6T, .cpu_user_features = COMMON_USER_PA6T, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, @@ -466,6 +490,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4 (compatible)", .cpu_features = CPU_FTRS_COMPATIBLE, .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -483,6 +508,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC601, .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, @@ -494,6 +520,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "603", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -506,6 +533,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "603e", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -518,6 +546,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "603ev", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -530,6 +559,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 2, @@ -543,6 +573,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604e", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -556,6 +587,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604r", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -569,6 +601,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "604ev", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -582,6 +615,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "740/750", .cpu_features = CPU_FTRS_740_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -595,6 +629,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -608,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -622,6 +658,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -636,6 +673,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -650,6 +688,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750CL", .cpu_features = CPU_FTRS_750CL, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -664,6 +703,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "745/755", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -678,6 +718,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX1, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -692,6 +733,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX2, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -706,6 +748,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -720,6 +763,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "750GX", .cpu_features = CPU_FTRS_750GX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -734,6 +778,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "740/750", .cpu_features = CPU_FTRS_740, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -749,6 +794,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7400_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -764,6 +810,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -779,6 +826,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -794,6 +842,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7450_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -811,6 +860,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7450_21, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -828,6 +878,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7450_23, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -845,6 +896,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7455_1, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -862,6 +914,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7455_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -879,6 +932,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7455, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -896,6 +950,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -913,6 +968,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -929,6 +985,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -946,6 +1003,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7447A, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -963,6 +1021,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_7448, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, @@ -979,6 +1038,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "82xx", .cpu_features = CPU_FTRS_82XX, .cpu_user_features = COMMON_USER, + .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -991,6 +1051,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "G2_LE", .cpu_features = CPU_FTRS_G2_LE, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1003,6 +1064,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c1", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1015,6 +1077,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c2", .cpu_features = CPU_FTRS_E300C2, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1027,6 +1090,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c3", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1041,6 +1105,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e300c4", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, @@ -1056,6 +1121,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "(generic PPC)", .cpu_features = CPU_FTRS_CLASSIC32, .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, @@ -1071,6 +1137,7 @@ static struct cpu_spec __initdata cpu_specs[] = { * if the 8xx code is there.... */ .cpu_features = CPU_FTRS_8XX, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_8xx, .icache_bsize = 16, .dcache_bsize = 16, .platform = "ppc823", @@ -1083,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "403GC", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, @@ -1095,6 +1163,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, @@ -1106,6 +1175,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "403G ??", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, @@ -1118,6 +1188,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1130,6 +1201,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1142,6 +1214,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1154,6 +1227,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1166,6 +1240,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1178,6 +1253,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1190,6 +1266,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1202,6 +1279,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1213,6 +1291,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "405LP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1225,6 +1304,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1237,6 +1317,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1249,6 +1330,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1261,6 +1343,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1273,6 +1356,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1286,6 +1370,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1298,6 +1383,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1312,6 +1398,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GR Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1323,6 +1410,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, @@ -1335,6 +1423,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GR Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1346,6 +1435,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, @@ -1358,6 +1448,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, @@ -1370,6 +1461,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GRX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440grx, @@ -1382,6 +1474,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440EPX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440epx, @@ -1394,6 +1487,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1405,6 +1499,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1416,6 +1511,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1428,6 +1524,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1440,6 +1537,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1452,6 +1550,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440GX Rev. F", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, @@ -1464,6 +1563,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440SP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1475,6 +1575,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440SPe Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, @@ -1487,6 +1588,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440SPe Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, @@ -1499,6 +1601,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "440 in Virtex-5 FXT", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440x5, @@ -1509,8 +1612,9 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0002, .pvr_value = 0x13020002, .cpu_name = "460EX", - .cpu_features = CPU_FTRS_44X, + .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460ex, @@ -1521,8 +1625,9 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0002, .pvr_value = 0x13020000, .cpu_name = "460GT", - .cpu_features = CPU_FTRS_44X, + .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460gt, @@ -1535,6 +1640,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "(generic 44x PPC)", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, @@ -1551,6 +1657,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, + .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", @@ -1565,6 +1672,7 @@ static struct cpu_spec __initdata cpu_specs[] = { PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_UNIFIED_CACHE, + .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", @@ -1577,6 +1685,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, + .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", @@ -1591,6 +1700,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -1608,6 +1718,7 @@ static struct cpu_spec __initdata cpu_specs[] = { PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_HAS_EFP_DOUBLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -1622,6 +1733,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "e500mc", .cpu_features = CPU_FTRS_E500MC, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, @@ -1638,6 +1750,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_e500, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3a6eaa876ee1..1c5c8a6fc129 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev, { } +#ifdef CONFIG_NOT_COHERENT_CACHE +static inline void dma_direct_sync_sg(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); +} + +static inline void dma_direct_sync_single_range(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + __dma_sync(bus_to_virt(dma_handle+offset), size, direction); +} +#endif + struct dma_mapping_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, @@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = { .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, +#ifdef CONFIG_NOT_COHERENT_CACHE + .sync_single_range_for_cpu = dma_direct_sync_single_range, + .sync_single_range_for_device = dma_direct_sync_single_range, + .sync_sg_for_cpu = dma_direct_sync_sg, + .sync_sg_for_device = dma_direct_sync_sg, +#endif }; EXPORT_SYMBOL(dma_direct_ops); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 7ecc0d1855c3..6f7eb7e00c79 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1162,39 +1162,17 @@ machine_check_in_rtas: #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) - stwu r1,-48(r1) - stw r3, 12(r1) - stw r4, 16(r1) - stw r5, 20(r1) - stw r6, 24(r1) - mflr r3 - stw r7, 28(r1) - mfcr r5 - stw r8, 32(r1) - stw r9, 36(r1) - stw r10,40(r1) - stw r3, 44(r1) - stw r5, 8(r1) - subi r3, r3, MCOUNT_INSN_SIZE - .globl mcount_call -mcount_call: - bl ftrace_stub - nop - lwz r6, 8(r1) - lwz r0, 44(r1) - lwz r3, 12(r1) + /* + * It is required that _mcount on PPC32 must preserve the + * link register. But we have r0 to play with. We use r0 + * to push the return address back to the caller of mcount + * into the ctr register, restore the link register and + * then jump back using the ctr register. + */ + mflr r0 mtctr r0 - lwz r4, 16(r1) - mtcr r6 - lwz r5, 20(r1) - lwz r6, 24(r1) - lwz r0, 52(r1) - lwz r7, 28(r1) - lwz r8, 32(r1) + lwz r0, 4(r1) mtlr r0 - lwz r9, 36(r1) - lwz r10,40(r1) - addi r1, r1, 48 bctr _GLOBAL(ftrace_caller) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index e0bcf9354286..383ed6eb0085 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -894,18 +894,6 @@ _GLOBAL(enter_prom) #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) - /* Taken from output of objdump from lib64/glibc */ - mflr r3 - stdu r1, -112(r1) - std r3, 128(r1) - subi r3, r3, MCOUNT_INSN_SIZE - .globl mcount_call -mcount_call: - bl ftrace_stub - nop - ld r0, 128(r1) - mtlr r0 - addi r1, r1, 112 blr _GLOBAL(ftrace_caller) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index f4b006ed0ab1..5355244c99ff 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -9,22 +9,30 @@ #include <linux/spinlock.h> #include <linux/hardirq.h> +#include <linux/uaccess.h> +#include <linux/module.h> #include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/list.h> #include <asm/cacheflush.h> +#include <asm/code-patching.h> #include <asm/ftrace.h> +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt , ...) do { } while (0) +#endif -static unsigned int ftrace_nop = 0x60000000; +static unsigned int ftrace_nop = PPC_NOP_INSTR; #ifdef CONFIG_PPC32 # define GET_ADDR(addr) addr #else /* PowerPC64's functions are data that points to the functions */ -# define GET_ADDR(addr) *(unsigned long *)addr +# define GET_ADDR(addr) (*(unsigned long *)addr) #endif @@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr) return (int)(addr - ip); } -unsigned char *ftrace_nop_replace(void) +static unsigned char *ftrace_nop_replace(void) { return (char *)&ftrace_nop; } -unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { static unsigned int op; @@ -68,49 +76,422 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) # define _ASM_PTR " .long " #endif -int +static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code) { - unsigned replaced; - unsigned old = *(unsigned *)old_code; - unsigned new = *(unsigned *)new_code; - int faulted = 0; + unsigned char replaced[MCOUNT_INSN_SIZE]; /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting - * as well as code changing. + * as well as code changing. We do this by using the + * probe_kernel_* functions. * * No real locking needed, this code is run through - * kstop_machine. + * kstop_machine, or before SMP starts. */ - asm volatile ( - "1: lwz %1, 0(%2)\n" - " cmpw %1, %5\n" - " bne 2f\n" - " stwu %3, 0(%2)\n" - "2:\n" - ".section .fixup, \"ax\"\n" - "3: li %0, 1\n" - " b 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - _ASM_ALIGN "\n" - _ASM_PTR "1b, 3b\n" - ".previous" - : "=r"(faulted), "=r"(replaced) - : "r"(ip), "r"(new), - "0"(faulted), "r"(old) - : "memory"); - - if (replaced != old && replaced != new) - faulted = 2; - - if (!faulted) - flush_icache_range(ip, ip + 8); - - return faulted; + + /* read the text we want to modify */ + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ + if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) + return -EINVAL; + + /* replace the text with the new text */ + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} + +/* + * Helper functions that are the same for both PPC64 and PPC32. + */ +static int test_24bit_addr(unsigned long ip, unsigned long addr) +{ + + /* use the create_branch to verify that this offset can be branched */ + return create_branch((unsigned int *)ip, addr, 0); +} + +static int is_bl_op(unsigned int op) +{ + return (op & 0xfc000003) == 0x48000001; +} + +static unsigned long find_bl_target(unsigned long ip, unsigned int op) +{ + static int offset; + + offset = (op & 0x03fffffc); + /* make it signed */ + if (offset & 0x02000000) + offset |= 0xfe000000; + + return ip + (long)offset; +} + +#ifdef CONFIG_PPC64 +static int +__ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + unsigned int jmp[5]; + unsigned long ptr; + unsigned long ip = rec->ip; + unsigned long tramp; + int offset; + + /* read where this goes */ + if (probe_kernel_read(&op, (void *)ip, sizeof(int))) + return -EFAULT; + + /* Make sure that that this is still a 24bit jump */ + if (!is_bl_op(op)) { + printk(KERN_ERR "Not expected bl: opcode is %x\n", op); + return -EINVAL; + } + + /* lets find where the pointer goes */ + tramp = find_bl_target(ip, op); + + /* + * On PPC64 the trampoline looks like: + * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> + * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> + * Where the bytes 2,3,6 and 7 make up the 32bit offset + * to the TOC that holds the pointer. + * to jump to. + * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) + * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) + * The actually address is 32 bytes from the offset + * into the TOC. + * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) + */ + + DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); + + /* Find where the trampoline jumps to */ + if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { + printk(KERN_ERR "Failed to read %lx\n", tramp); + return -EFAULT; + } + + DEBUGP(" %08x %08x", jmp[0], jmp[1]); + + /* verify that this is what we expect it to be */ + if (((jmp[0] & 0xffff0000) != 0x3d820000) || + ((jmp[1] & 0xffff0000) != 0x398c0000) || + (jmp[2] != 0xf8410028) || + (jmp[3] != 0xe96c0020) || + (jmp[4] != 0xe84c0028)) { + printk(KERN_ERR "Not a trampoline\n"); + return -EINVAL; + } + + offset = (unsigned)((unsigned short)jmp[0]) << 16 | + (unsigned)((unsigned short)jmp[1]); + + DEBUGP(" %x ", offset); + + /* get the address this jumps too */ + tramp = mod->arch.toc + offset + 32; + DEBUGP("toc: %lx", tramp); + + if (probe_kernel_read(jmp, (void *)tramp, 8)) { + printk(KERN_ERR "Failed to read %lx\n", tramp); + return -EFAULT; + } + + DEBUGP(" %08x %08x\n", jmp[0], jmp[1]); + + ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; + + /* This should match what was called */ + if (ptr != GET_ADDR(addr)) { + printk(KERN_ERR "addr does not match %lx\n", ptr); + return -EINVAL; + } + + /* + * We want to nop the line, but the next line is + * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) + * This needs to be turned to a nop too. + */ + if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (op != 0xe8410028) { + printk(KERN_ERR "Next line is not ld! (%08x)\n", op); + return -EINVAL; + } + + /* + * Milton Miller pointed out that we can not blindly do nops. + * If a task was preempted when calling a trace function, + * the nops will remove the way to restore the TOC in r2 + * and the r2 TOC will get corrupted. + */ + + /* + * Replace: + * bl <tramp> <==== will be replaced with "b 1f" + * ld r2,40(r1) + * 1: + */ + op = 0x48000008; /* b +8 */ + + if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) + return -EPERM; + + + flush_icache_range(ip, ip + 8); + + return 0; +} + +#else /* !PPC64 */ +static int +__ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + unsigned int jmp[4]; + unsigned long ip = rec->ip; + unsigned long tramp; + + if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure that that this is still a 24bit jump */ + if (!is_bl_op(op)) { + printk(KERN_ERR "Not expected bl: opcode is %x\n", op); + return -EINVAL; + } + + /* lets find where the pointer goes */ + tramp = find_bl_target(ip, op); + + /* + * On PPC32 the trampoline looks like: + * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha + * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l + * 0x7d, 0x69, 0x03, 0xa6 mtctr r11 + * 0x4e, 0x80, 0x04, 0x20 bctr + */ + + DEBUGP("ip:%lx jumps to %lx", ip, tramp); + + /* Find where the trampoline jumps to */ + if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { + printk(KERN_ERR "Failed to read %lx\n", tramp); + return -EFAULT; + } + + DEBUGP(" %08x %08x ", jmp[0], jmp[1]); + + /* verify that this is what we expect it to be */ + if (((jmp[0] & 0xffff0000) != 0x3d600000) || + ((jmp[1] & 0xffff0000) != 0x396b0000) || + (jmp[2] != 0x7d6903a6) || + (jmp[3] != 0x4e800420)) { + printk(KERN_ERR "Not a trampoline\n"); + return -EINVAL; + } + + tramp = (jmp[1] & 0xffff) | + ((jmp[0] & 0xffff) << 16); + if (tramp & 0x8000) + tramp -= 0x10000; + + DEBUGP(" %x ", tramp); + + if (tramp != addr) { + printk(KERN_ERR + "Trampoline location %08lx does not match addr\n", + tramp); + return -EINVAL; + } + + op = PPC_NOP_INSTR; + + if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} +#endif /* PPC64 */ + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *old, *new; + unsigned long ip = rec->ip; + + /* + * If the calling address is more that 24 bits away, + * then we had to use a trampoline to make the call. + * Otherwise just update the call site. + */ + if (test_24bit_addr(ip, addr)) { + /* within range */ + old = ftrace_call_replace(ip, addr); + new = ftrace_nop_replace(); + return ftrace_modify_code(ip, old, new); + } + + /* + * Out of range jumps are called from modules. + * We should either already have a pointer to the module + * or it has been passed in. + */ + if (!rec->arch.mod) { + if (!mod) { + printk(KERN_ERR "No module loaded addr=%lx\n", + addr); + return -EFAULT; + } + rec->arch.mod = mod; + } else if (mod) { + if (mod != rec->arch.mod) { + printk(KERN_ERR + "Record mod %p not equal to passed in mod %p\n", + rec->arch.mod, mod); + return -EINVAL; + } + /* nothing to do if mod == rec->arch.mod */ + } else + mod = rec->arch.mod; + + return __ftrace_make_nop(mod, rec, addr); + +} + +#ifdef CONFIG_PPC64 +static int +__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op[2]; + unsigned long ip = rec->ip; + + /* read where this goes */ + if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) + return -EFAULT; + + /* + * It should be pointing to two nops or + * b +8; ld r2,40(r1) + */ + if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && + ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) { + printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); + return -EINVAL; + } + + /* If we never set up a trampoline to ftrace_caller, then bail */ + if (!rec->arch.mod->arch.tramp) { + printk(KERN_ERR "No ftrace trampoline\n"); + return -EINVAL; + } + + /* create the branch to the trampoline */ + op[0] = create_branch((unsigned int *)ip, + rec->arch.mod->arch.tramp, BRANCH_SET_LINK); + if (!op[0]) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } + + /* ld r2,40(r1) */ + op[1] = 0xe8410028; + + DEBUGP("write to %lx\n", rec->ip); + + if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} +#else +static int +__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + unsigned long ip = rec->ip; + + /* read where this goes */ + if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* It should be pointing to a nop */ + if (op != PPC_NOP_INSTR) { + printk(KERN_ERR "Expected NOP but have %x\n", op); + return -EINVAL; + } + + /* If we never set up a trampoline to ftrace_caller, then bail */ + if (!rec->arch.mod->arch.tramp) { + printk(KERN_ERR "No ftrace trampoline\n"); + return -EINVAL; + } + + /* create the branch to the trampoline */ + op = create_branch((unsigned int *)ip, + rec->arch.mod->arch.tramp, BRANCH_SET_LINK); + if (!op) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } + + DEBUGP("write to %lx\n", rec->ip); + + if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + 8); + + return 0; +} +#endif /* CONFIG_PPC64 */ + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *old, *new; + unsigned long ip = rec->ip; + + /* + * If the calling address is more that 24 bits away, + * then we had to use a trampoline to make the call. + * Otherwise just update the call site. + */ + if (test_24bit_addr(ip, addr)) { + /* within range */ + old = ftrace_nop_replace(); + new = ftrace_call_replace(ip, addr); + return ftrace_modify_code(ip, old, new); + } + + /* + * Out of range jumps are called from modules. + * Being that we are converting from nop, it had better + * already have a module defined. + */ + if (!rec->arch.mod) { + printk(KERN_ERR "No module loaded\n"); + return -EINVAL; + } + + return __ftrace_make_call(rec, addr); } int ftrace_update_ftrace_func(ftrace_func_t func) @@ -128,10 +509,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func) int __init ftrace_dyn_arch_init(void *data) { - /* This is running in kstop_machine */ + /* caller expects data to be zero */ + unsigned long *p = data; - ftrace_mcount_set(data); + *p = 0; return 0; } - diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0c326823c6d4..a1c4cfd25ded 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -31,6 +31,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/bug.h> /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -182,7 +183,8 @@ __after_mmu_off: bl reloc_offset mr r26,r3 addis r4,r3,KERNELBASE@h /* current address of _start */ - cmpwi 0,r4,0 /* are we already running at 0? */ + lis r5,PHYSICAL_START@h + cmplw 0,r4,r5 /* already running at PHYSICAL_START? */ bne relocate_kernel /* * we now have the 1st 16M of ram mapped with the bats. @@ -810,13 +812,13 @@ giveup_altivec: /* * This code is jumped to from the startup code to copy - * the kernel image to physical address 0. + * the kernel image to physical address PHYSICAL_START. */ relocate_kernel: addis r9,r26,klimit@ha /* fetch klimit */ lwz r25,klimit@l(r9) addis r25,r25,-KERNELBASE@h - li r3,0 /* Destination base address */ + lis r3,PHYSICAL_START@h /* Destination base address */ li r6,0 /* Destination offset */ li r5,0x4000 /* # bytes of memory to copy */ bl copy_and_flush /* copy the first 0x4000 bytes */ @@ -989,12 +991,12 @@ load_up_mmu: LOAD_BAT(1,r3,r4,r5) LOAD_BAT(2,r3,r4,r5) LOAD_BAT(3,r3,r4,r5) -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION LOAD_BAT(4,r3,r4,r5) LOAD_BAT(5,r3,r4,r5) LOAD_BAT(6,r3,r4,r5) LOAD_BAT(7,r3,r4,r5) -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr /* @@ -1070,9 +1072,14 @@ start_here: RFI /* + * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); + * * Set up the segment registers for a new context. */ -_ENTRY(set_context) +_ENTRY(switch_mmu_context) + lwz r3,MMCONTEXTID(r4) + cmpwi cr0,r3,0 + blt- 4f mulli r3,r3,897 /* multiply context by skew factor */ rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ addis r3,r3,0x6000 /* Set Ks, Ku bits */ @@ -1083,6 +1090,7 @@ _ENTRY(set_context) /* Context switch the PTE pointer for the Abatron BDI2000. * The PGDIR is passed as second argument. */ + lwz r4,MM_PGD(r4) lis r5, KERNELBASE@h lwz r5, 0xf0(r5) stw r4, 0x4(r5) @@ -1098,6 +1106,9 @@ _ENTRY(set_context) sync isync blr +4: trap + EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 + blr /* * An undocumented "feature" of 604e requires that the v bit @@ -1131,7 +1142,7 @@ clear_bats: mtspr SPRN_IBAT2L,r10 mtspr SPRN_IBAT3U,r10 mtspr SPRN_IBAT3L,r10 -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION /* Here's a tweak: at this point, CPU setup have * not been called yet, so HIGH_BAT_EN may not be * set in HID0 for the 745x processors. However, it @@ -1154,7 +1165,7 @@ BEGIN_FTR_SECTION mtspr SPRN_IBAT6L,r10 mtspr SPRN_IBAT7U,r10 mtspr SPRN_IBAT7L,r10 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr flush_tlbs: @@ -1178,11 +1189,11 @@ mmu_off: /* * Use the first pair of BAT registers to map the 1st 16MB - * of RAM to KERNELBASE. From this point on we can't safely + * of RAM to PAGE_OFFSET. From this point on we can't safely * call OF any more. */ initial_bats: - lis r11,KERNELBASE@h + lis r11,PAGE_OFFSET@h mfspr r9,SPRN_PVR rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ cmpwi 0,r9,1 diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index f3a1ea9d7fe4..b56fecc93a16 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -69,6 +69,17 @@ _ENTRY(_start); li r24,0 /* CPU number */ /* + * In case the firmware didn't do it, we apply some workarounds + * that are good for all 440 core variants here + */ + mfspr r3,SPRN_CCR0 + rlwinm r3,r3,0,0,27 /* disable icache prefetch */ + isync + mtspr SPRN_CCR0,r3 + isync + sync + +/* * Set up the initial MMU state * * We are still executing code at the virtual address @@ -391,12 +402,14 @@ interrupt_base: rlwimi r13,r12,10,30,30 /* Load the PTE */ - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + /* Compute pgdir/pmd offset */ + rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 lwzx r11, r12, r11 /* Get pgd/pmd entry */ rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ beq 2f /* Bail if no table */ - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + /* Compute pte address */ + rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 lwz r11, 0(r12) /* Get high word of pte entry */ lwz r12, 4(r12) /* Get low word of pte entry */ @@ -485,12 +498,14 @@ tlb_44x_patch_hwater_D: /* Make up the required permissions */ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ + /* Compute pgdir/pmd offset */ + rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 lwzx r11, r12, r11 /* Get pgd/pmd entry */ rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ beq 2f /* Bail if no table */ - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ + /* Compute pte address */ + rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 lwz r11, 0(r12) /* Get high word of pte entry */ lwz r12, 4(r12) /* Get low word of pte entry */ @@ -554,15 +569,16 @@ tlb_44x_patch_hwater_I: */ finish_tlb_load: /* Combine RPN & ERPN an write WS 0 */ - rlwimi r11,r12,0,0,19 + rlwimi r11,r12,0,0,31-PAGE_SHIFT tlbwe r11,r13,PPC44x_TLB_XLAT /* * Create WS1. This is the faulting address (EPN), * page size, and valid flag. */ - li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K - rlwimi r10,r11,0,20,31 /* Insert valid and page size*/ + li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE + /* Insert valid and page size */ + rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ /* And WS 2 */ @@ -634,12 +650,12 @@ _GLOBAL(set_context) * goes at the beginning of the data segment, which is page-aligned. */ .data - .align 12 + .align PAGE_SHIFT .globl sdata sdata: .globl empty_zero_page empty_zero_page: - .space 4096 + .space PAGE_SIZE /* * To support >32-bit physical addresses, we use an 8KB pgdir. diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 590304c24dad..11b549acc034 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -92,6 +92,7 @@ _ENTRY(_start); * if needed */ +_ENTRY(__early_start) /* 1. Find the index of the entry we're executing in */ bl invstr /* Find our address */ invstr: mflr r6 /* Make it accessible */ @@ -235,36 +236,40 @@ skpinv: addi r6,r6,1 /* Increment */ tlbivax 0,r9 TLBSYNC +/* The mapping only needs to be cache-coherent on SMP */ +#ifdef CONFIG_SMP +#define M_IF_SMP MAS2_M +#else +#define M_IF_SMP 0 +#endif + /* 6. Setup KERNELBASE mapping in TLB1[0] */ lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ mtspr SPRN_MAS0,r6 lis r6,(MAS1_VALID|MAS1_IPROT)@h ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l mtspr SPRN_MAS1,r6 - li r7,0 - lis r6,PAGE_OFFSET@h - ori r6,r6,PAGE_OFFSET@l - rlwimi r6,r7,0,20,31 + lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h + ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l mtspr SPRN_MAS2,r6 mtspr SPRN_MAS3,r8 tlbwe /* 7. Jump to KERNELBASE mapping */ - lis r6,KERNELBASE@h - ori r6,r6,KERNELBASE@l - rlwimi r6,r7,0,20,31 + lis r6,(KERNELBASE & ~0xfff)@h + ori r6,r6,(KERNELBASE & ~0xfff)@l lis r7,MSR_KERNEL@h ori r7,r7,MSR_KERNEL@l bl 1f /* Find our address */ 1: mflr r9 rlwimi r6,r9,0,20,31 - addi r6,r6,24 + addi r6,r6,(2f - 1b) mtspr SPRN_SRR0,r6 mtspr SPRN_SRR1,r7 rfi /* start execution out of TLB1[0] entry */ /* 8. Clear out the temp mapping */ - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ +2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ mtspr SPRN_MAS0,r7 tlbre @@ -344,6 +349,15 @@ skpinv: addi r6,r6,1 /* Increment */ mtspr SPRN_DBSR,r2 #endif +#ifdef CONFIG_SMP + /* Check to see if we're the second processor, and jump + * to the secondary_start code if so + */ + mfspr r24,SPRN_PIR + cmpwi r24,0 + bne __secondary_start +#endif + /* * This is where the main kernel code starts. */ @@ -685,12 +699,13 @@ interrupt_base: /* SPE Floating Point Data */ #ifdef CONFIG_SPE EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); -#else - EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) -#endif /* CONFIG_SPE */ /* SPE Floating Point Round */ + EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE) +#else + EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) +#endif /* CONFIG_SPE */ /* Performance Monitor */ EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) @@ -735,6 +750,9 @@ finish_tlb_load: #else rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ #endif +#ifdef CONFIG_SMP + ori r12, r12, MAS2_M +#endif mtspr SPRN_MAS2, r12 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT) @@ -746,15 +764,15 @@ finish_tlb_load: iseleq r12, r12, r10 #ifdef CONFIG_PTE_64BIT -2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ + rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ mtspr SPRN_MAS3, r12 -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION srwi r10, r13, 8 /* grab RPN[8:31] */ mtspr SPRN_MAS7, r10 -END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) #else -2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ + rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ mtspr SPRN_MAS3, r11 #endif #ifdef CONFIG_E200 @@ -1037,6 +1055,63 @@ _GLOBAL(flush_dcache_L1) blr +#ifdef CONFIG_SMP +/* When we get here, r24 needs to hold the CPU # */ + .globl __secondary_start +__secondary_start: + lis r3,__secondary_hold_acknowledge@h + ori r3,r3,__secondary_hold_acknowledge@l + stw r24,0(r3) + + li r3,0 + mr r4,r24 /* Why? */ + bl call_setup_cpu + + lis r3,tlbcam_index@ha + lwz r3,tlbcam_index@l(r3) + mtctr r3 + li r26,0 /* r26 safe? */ + + /* Load each CAM entry */ +1: mr r3,r26 + bl loadcam_entry + addi r26,r26,1 + bdnz 1b + + /* get current_thread_info and current */ + lis r1,secondary_ti@ha + lwz r1,secondary_ti@l(r1) + lwz r2,TI_TASK(r1) + + /* stack */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r0,0 + stw r0,0(r1) + + /* ptr to current thread */ + addi r4,r2,THREAD /* address of our thread_struct */ + mtspr SPRN_SPRG3,r4 + + /* Setup the defaults for TLB entries */ + li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l + mtspr SPRN_MAS4,r4 + + /* Jump to start_secondary */ + lis r4,MSR_KERNEL@h + ori r4,r4,MSR_KERNEL@l + lis r3,start_secondary@h + ori r3,r3,start_secondary@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + sync + rfi + sync + + .globl __secondary_hold_acknowledge +__secondary_hold_acknowledge: + .long -1 +#endif + /* * We put a few things here that have to be page-aligned. This stuff * goes at the beginning of the data segment, which is page-aligned. diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 64299d28f364..6e3f62493659 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -47,7 +47,7 @@ #include <asm/abs_addr.h> static struct device ibmebus_bus_device = { /* fake "parent" device */ - .bus_id = "ibmebus", + .init_name = "ibmebus", }; struct bus_type ibmebus_bus_type; @@ -231,6 +231,7 @@ void ibmebus_free_irq(u32 ist, void *dev_id) unsigned int irq = irq_find_mapping(NULL, ist); free_irq(irq, dev_id); + irq_dispose_mapping(irq); } EXPORT_SYMBOL(ibmebus_free_irq); diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 31982d05d81a..88d9c1d5e5fb 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -69,10 +69,15 @@ void cpu_idle(void) smp_mb(); local_irq_disable(); + /* Don't trace irqs off for idle */ + stop_critical_timings(); + /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); + start_critical_timings(); + local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index ac2a21f45c75..b3abebb7ee64 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -13,13 +13,17 @@ #include <linux/reboot.h> #include <linux/threads.h> #include <linux/lmb.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/prom.h> +#include <asm/sections.h> void machine_crash_shutdown(struct pt_regs *regs) { if (ppc_md.machine_crash_shutdown) ppc_md.machine_crash_shutdown(regs); + else + default_machine_crash_shutdown(regs); } /* @@ -31,11 +35,8 @@ int machine_kexec_prepare(struct kimage *image) { if (ppc_md.machine_kexec_prepare) return ppc_md.machine_kexec_prepare(image); - /* - * Fail if platform doesn't provide its own machine_kexec_prepare - * implementation. - */ - return -ENOSYS; + else + return default_machine_kexec_prepare(image); } void machine_kexec_cleanup(struct kimage *image) @@ -52,13 +53,11 @@ void machine_kexec(struct kimage *image) { if (ppc_md.machine_kexec) ppc_md.machine_kexec(image); - else { - /* - * Fall back to normal restart if platform doesn't provide - * its own kexec function, and user insist to kexec... - */ - machine_restart(NULL); - } + else + default_machine_kexec(image); + + /* Fall back to normal restart if we're still alive. */ + machine_restart(NULL); for(;;); } @@ -118,3 +117,71 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) { return (start + size) > crashk_res.start && start <= crashk_res.end; } + +/* Values we need to export to the second kernel via the device tree. */ +static unsigned long kernel_end; +static unsigned long crashk_size; + +static struct property kernel_end_prop = { + .name = "linux,kernel-end", + .length = sizeof(unsigned long), + .value = &kernel_end, +}; + +static struct property crashk_base_prop = { + .name = "linux,crashkernel-base", + .length = sizeof(unsigned long), + .value = &crashk_res.start, +}; + +static struct property crashk_size_prop = { + .name = "linux,crashkernel-size", + .length = sizeof(unsigned long), + .value = &crashk_size, +}; + +static void __init export_crashk_values(struct device_node *node) +{ + struct property *prop; + + /* There might be existing crash kernel properties, but we can't + * be sure what's in them, so remove them. */ + prop = of_find_property(node, "linux,crashkernel-base", NULL); + if (prop) + prom_remove_property(node, prop); + + prop = of_find_property(node, "linux,crashkernel-size", NULL); + if (prop) + prom_remove_property(node, prop); + + if (crashk_res.start != 0) { + prom_add_property(node, &crashk_base_prop); + crashk_size = crashk_res.end - crashk_res.start + 1; + prom_add_property(node, &crashk_size_prop); + } +} + +static int __init kexec_setup(void) +{ + struct device_node *node; + struct property *prop; + + node = of_find_node_by_path("/chosen"); + if (!node) + return -ENOENT; + + /* remove any stale properties so ours can be found */ + prop = of_find_property(node, kernel_end_prop.name, NULL); + if (prop) + prom_remove_property(node, prop); + + /* information needed by userspace when using default_machine_kexec */ + kernel_end = __pa(_end); + prom_add_property(node, &kernel_end_prop); + + export_crashk_values(node); + + of_node_put(node); + return 0; +} +late_initcall(kexec_setup); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 3c4ca046e854..49e705fcee6d 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -289,7 +289,7 @@ void default_machine_kexec(struct kimage *image) } /* Values we need to export to the second kernel via the device tree. */ -static unsigned long htab_base, kernel_end; +static unsigned long htab_base; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -303,25 +303,20 @@ static struct property htab_size_prop = { .value = &htab_size_bytes, }; -static struct property kernel_end_prop = { - .name = "linux,kernel-end", - .length = sizeof(unsigned long), - .value = &kernel_end, -}; - -static void __init export_htab_values(void) +static int __init export_htab_values(void) { struct device_node *node; struct property *prop; + /* On machines with no htab htab_address is NULL */ + if (!htab_address) + return -ENODEV; + node = of_find_node_by_path("/chosen"); if (!node) - return; + return -ENODEV; /* remove any stale propertys so ours can be found */ - prop = of_find_property(node, kernel_end_prop.name, NULL); - if (prop) - prom_remove_property(node, prop); prop = of_find_property(node, htab_base_prop.name, NULL); if (prop) prom_remove_property(node, prop); @@ -329,68 +324,11 @@ static void __init export_htab_values(void) if (prop) prom_remove_property(node, prop); - /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); - prom_add_property(node, &kernel_end_prop); - - /* On machines with no htab htab_address is NULL */ - if (NULL == htab_address) - goto out; - htab_base = __pa(htab_address); prom_add_property(node, &htab_base_prop); prom_add_property(node, &htab_size_prop); - out: - of_node_put(node); -} - -static struct property crashk_base_prop = { - .name = "linux,crashkernel-base", - .length = sizeof(unsigned long), - .value = &crashk_res.start, -}; - -static unsigned long crashk_size; - -static struct property crashk_size_prop = { - .name = "linux,crashkernel-size", - .length = sizeof(unsigned long), - .value = &crashk_size, -}; - -static void __init export_crashk_values(void) -{ - struct device_node *node; - struct property *prop; - - node = of_find_node_by_path("/chosen"); - if (!node) - return; - - /* There might be existing crash kernel properties, but we can't - * be sure what's in them, so remove them. */ - prop = of_find_property(node, "linux,crashkernel-base", NULL); - if (prop) - prom_remove_property(node, prop); - - prop = of_find_property(node, "linux,crashkernel-size", NULL); - if (prop) - prom_remove_property(node, prop); - - if (crashk_res.start != 0) { - prom_add_property(node, &crashk_base_prop); - crashk_size = crashk_res.end - crashk_res.start + 1; - prom_add_property(node, &crashk_size_prop); - } - of_node_put(node); -} - -static int __init kexec_setup(void) -{ - export_htab_values(); - export_crashk_values(); return 0; } -__initcall(kexec_setup); +late_initcall(export_htab_values); diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 5c33bc14bd9f..15f28e0de78d 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -29,6 +29,7 @@ #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/kexec.h> +#include <asm/bug.h> .text @@ -271,231 +272,6 @@ _GLOBAL(real_writeb) #endif /* CONFIG_40x */ -/* - * Flush MMU TLB - */ -#ifndef CONFIG_FSL_BOOKE -_GLOBAL(_tlbil_all) -_GLOBAL(_tlbil_pid) -#endif -_GLOBAL(_tlbia) -#if defined(CONFIG_40x) - sync /* Flush to memory before changing mapping */ - tlbia - isync /* Flush shadow TLB */ -#elif defined(CONFIG_44x) - li r3,0 - sync - - /* Load high watermark */ - lis r4,tlb_44x_hwater@ha - lwz r5,tlb_44x_hwater@l(r4) - -1: tlbwe r3,r3,PPC44x_TLB_PAGEID - addi r3,r3,1 - cmpw 0,r3,r5 - ble 1b - - isync -#elif defined(CONFIG_FSL_BOOKE) - /* Invalidate all entries in TLB0 */ - li r3, 0x04 - tlbivax 0,3 - /* Invalidate all entries in TLB1 */ - li r3, 0x0c - tlbivax 0,3 - msync -#ifdef CONFIG_SMP - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) - lwz r8,TI_CPU(r8) - oris r8,r8,10 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - sync - tlbia - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - sync - tlbia - sync -#endif /* CONFIG_SMP */ -#endif /* ! defined(CONFIG_40x) */ - blr - -/* - * Flush MMU TLB for a particular address - */ -#ifndef CONFIG_FSL_BOOKE -_GLOBAL(_tlbil_va) -#endif -_GLOBAL(_tlbie) -#if defined(CONFIG_40x) - /* We run the search with interrupts disabled because we have to change - * the PID and I don't want to preempt when that happens. - */ - mfmsr r5 - mfspr r6,SPRN_PID - wrteei 0 - mtspr SPRN_PID,r4 - tlbsx. r3, 0, r3 - mtspr SPRN_PID,r6 - wrtee r5 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. - * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate - * the TLB entry. */ - tlbwe r3, r3, TLB_TAG - isync -10: - -#elif defined(CONFIG_44x) - mfspr r5,SPRN_MMUCR - rlwimi r5,r4,0,24,31 /* Set TID */ - - /* We have to run the search with interrupts disabled, even critical - * and debug interrupts (in fact the only critical exceptions we have - * are debug and machine check). Otherwise an interrupt which causes - * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ - mfmsr r4 - lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha - addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l - andc r6,r4,r6 - mtmsr r6 - mtspr SPRN_MMUCR,r5 - tlbsx. r3, 0, r3 - mtmsr r4 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, - * which means bit 22, is clear. Since 22 is - * the V bit in the TLB_PAGEID, loading this - * value will invalidate the TLB entry. - */ - tlbwe r3, r3, PPC44x_TLB_PAGEID - isync -10: -#elif defined(CONFIG_FSL_BOOKE) - rlwinm r4, r3, 0, 0, 19 - ori r5, r4, 0x08 /* TLBSEL = 1 */ - tlbivax 0, r4 - tlbivax 0, r5 - msync -#if defined(CONFIG_SMP) - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,(31-THREAD_SHIFT) - lwz r8,TI_CPU(r8) - oris r8,r8,11 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - eieio - tlbie r3 - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - tlbie r3 - sync -#endif /* CONFIG_SMP */ -#endif /* ! CONFIG_40x */ - blr - -#if defined(CONFIG_FSL_BOOKE) -/* - * Flush MMU TLB, but only on the local processor (no broadcast) - */ -_GLOBAL(_tlbil_all) -#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ - MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) - li r3,(MMUCSR0_TLBFI)@l - mtspr SPRN_MMUCSR0, r3 -1: - mfspr r3,SPRN_MMUCSR0 - andi. r3,r3,MMUCSR0_TLBFI@l - bne 1b - blr - -/* - * Flush MMU TLB for a particular process id, but only on the local processor - * (no broadcast) - */ -_GLOBAL(_tlbil_pid) -/* we currently do an invalidate all since we don't have per pid invalidate */ - li r3,(MMUCSR0_TLBFI)@l - mtspr SPRN_MMUCSR0, r3 -1: - mfspr r3,SPRN_MMUCSR0 - andi. r3,r3,MMUCSR0_TLBFI@l - bne 1b - msync - isync - blr - -/* - * Flush MMU TLB for a particular address, but only on the local processor - * (no broadcast) - */ -_GLOBAL(_tlbil_va) - mfmsr r10 - wrteei 0 - slwi r4,r4,16 - mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ - tlbsx 0,r3 - mfspr r4,SPRN_MAS1 /* check valid */ - andis. r3,r4,MAS1_VALID@h - beq 1f - rlwinm r4,r4,0,1,31 - mtspr SPRN_MAS1,r4 - tlbwe - msync - isync -1: wrtee r10 - blr -#endif /* CONFIG_FSL_BOOKE */ - /* * Flush instruction cache. @@ -650,8 +426,8 @@ _GLOBAL(__flush_dcache_icache) BEGIN_FTR_SECTION blr END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ + rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ + li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ @@ -691,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) rlwinm r0,r10,0,28,26 /* clear DR */ mtmsr r0 isync - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ + rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ + li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ @@ -716,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) * void clear_pages(void *page, int order) ; */ _GLOBAL(clear_pages) - li r0,4096/L1_CACHE_BYTES + li r0,PAGE_SIZE/L1_CACHE_BYTES slw r0,r0,r4 mtctr r0 #ifdef CONFIG_8xx @@ -774,7 +550,7 @@ _GLOBAL(copy_page) dcbt r5,r4 li r11,L1_CACHE_BYTES+4 #endif /* MAX_COPY_PREFETCH */ - li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH + li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH crclr 4*cr0+eq 2: mtctr r0 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 7ff292475269..43e7e3a7f130 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); + sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup"); + if (sect != NULL) + do_feature_fixups(cur_cpu_spec->mmu_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); + #ifdef CONFIG_PPC64 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 2df91a03462a..f832773fc28e 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -22,6 +22,7 @@ #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> +#include <linux/ftrace.h> #include <linux/cache.h> #include <linux/bug.h> #include <linux/sort.h> @@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) r_addend = rela[i].r_addend; } +#ifdef CONFIG_DYNAMIC_FTRACE + _count_relocs++; /* add one for ftrace_caller */ +#endif return _count_relocs; } @@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, return -ENOEXEC; } } +#ifdef CONFIG_DYNAMIC_FTRACE + module->arch.tramp = + do_plt_call(module->module_core, + (unsigned long)ftrace_caller, + sechdrs, module); +#endif return 0; } diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 1af2377e4992..8992b031a7b6 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -20,6 +20,7 @@ #include <linux/moduleloader.h> #include <linux/err.h> #include <linux/vmalloc.h> +#include <linux/ftrace.h> #include <linux/bug.h> #include <asm/module.h> #include <asm/firmware.h> @@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, } } +#ifdef CONFIG_DYNAMIC_FTRACE + /* make the trampoline to the ftrace_caller */ + relocs++; +#endif + DEBUGP("Looks like a total of %lu stubs, max\n", relocs); return relocs * sizeof(struct ppc64_stub_entry); } @@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } } +#ifdef CONFIG_DYNAMIC_FTRACE + me->arch.toc = my_r2(sechdrs, me); + me->arch.tramp = stub_for_addr(sechdrs, + (unsigned long)ftrace_caller, + me); +#endif + return 0; } diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index f3c9cae01dd5..fa983a59c4ce 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c @@ -14,7 +14,6 @@ static void of_device_make_bus_id(struct of_device *dev) { static atomic_t bus_no_reg_magic; struct device_node *node = dev->node; - char *name = dev->dev.bus_id; const u32 *reg; u64 addr; int magic; @@ -27,14 +26,12 @@ static void of_device_make_bus_id(struct of_device *dev) reg = of_get_property(node, "dcr-reg", NULL); if (reg) { #ifdef CONFIG_PPC_DCR_NATIVE - snprintf(name, BUS_ID_SIZE, "d%x.%s", - *reg, node->name); + dev_set_name(&dev->dev, "d%x.%s", *reg, node->name); #else /* CONFIG_PPC_DCR_NATIVE */ addr = of_translate_dcr_address(node, *reg, NULL); if (addr != OF_BAD_ADDR) { - snprintf(name, BUS_ID_SIZE, - "D%llx.%s", (unsigned long long)addr, - node->name); + dev_set_name(&dev->dev, "D%llx.%s", + (unsigned long long)addr, node->name); return; } #endif /* !CONFIG_PPC_DCR_NATIVE */ @@ -48,9 +45,8 @@ static void of_device_make_bus_id(struct of_device *dev) if (reg) { addr = of_translate_address(node, reg); if (addr != OF_BAD_ADDR) { - snprintf(name, BUS_ID_SIZE, - "%llx.%s", (unsigned long long)addr, - node->name); + dev_set_name(&dev->dev, "%llx.%s", + (unsigned long long)addr, node->name); return; } } @@ -60,7 +56,7 @@ static void of_device_make_bus_id(struct of_device *dev) * counter (and pray...) */ magic = atomic_add_return(1, &bus_no_reg_magic); - snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1); + dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1); } struct of_device *of_device_alloc(struct device_node *np, @@ -80,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np, dev->dev.archdata.of_node = np; if (bus_id) - strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); + dev_set_name(&dev->dev, bus_id); else of_device_make_bus_id(dev); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 48a347133f41..c744b327bcab 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -37,6 +37,7 @@ struct lppaca lppaca[] = { .end_of_quantum = 0xfffffffffffffffful, .slb_count = 64, .vmxregs_in_use = 0, + .page_ins = 0, }, }; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index f36936d9fda3..2538030954d8 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -37,13 +37,7 @@ #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> - -#ifdef DEBUG -#include <asm/udbg.h> -#define DBG(fmt...) printk(fmt) -#else -#define DBG(fmt...) -#endif +#include <asm/eeh.h> static DEFINE_SPINLOCK(hose_spinlock); @@ -53,8 +47,9 @@ static int global_phb_number; /* Global phb counter */ /* ISA Memory physical address */ resource_size_t isa_mem_base; -/* Default PCI flags is 0 */ -unsigned int ppc_pci_flags; +/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ +unsigned int ppc_pci_flags = 0; + static struct dma_mapping_ops *pci_dma_ops; @@ -165,8 +160,6 @@ EXPORT_SYMBOL(pci_domain_nr); */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { - if (!have_of) - return NULL; while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) @@ -208,26 +201,6 @@ char __devinit *pcibios_setup(char *str) return str; } -void __devinit pcibios_setup_new_device(struct pci_dev *dev) -{ - struct dev_archdata *sd = &dev->dev.archdata; - - sd->of_node = pci_device_to_OF_node(dev); - - DBG("PCI: device %s OF node: %s\n", pci_name(dev), - sd->of_node ? sd->of_node->full_name : "<none>"); - - sd->dma_ops = pci_dma_ops; -#ifdef CONFIG_PPC32 - sd->dma_data = (void *)PCI_DRAM_OFFSET; -#endif - set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); - - if (ppc_md.pci_dma_dev_setup) - ppc_md.pci_dma_dev_setup(dev); -} -EXPORT_SYMBOL(pcibios_setup_new_device); - /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the @@ -252,7 +225,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) return -1; #endif - DBG("Try to map irq for %s...\n", pci_name(pci_dev)); + pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); #ifdef DEBUG memset(&oirq, 0xff, sizeof(oirq)); @@ -276,26 +249,26 @@ int pci_read_irq_line(struct pci_dev *pci_dev) line == 0xff || line == 0) { return -1; } - DBG(" -> no map ! Using line %d (pin %d) from PCI config\n", - line, pin); + pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", + line, pin); virq = irq_create_mapping(NULL, line); if (virq != NO_IRQ) set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { - DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", - oirq.size, oirq.specifier[0], oirq.specifier[1], + pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", + oirq.size, oirq.specifier[0], oirq.specifier[1], oirq.controller->full_name); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } if(virq == NO_IRQ) { - DBG(" -> failed to map !\n"); + pr_debug(" Failed to map !\n"); return -1; } - DBG(" -> mapped to linux irq %d\n", virq); + pr_debug(" Mapped to linux irq %d\n", virq); pci_dev->irq = virq; @@ -397,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, } /* XXX would be nice to have a way to ask for write-through */ - prot |= _PAGE_NO_CACHE; if (write_combine) - prot &= ~_PAGE_GUARDED; + return pgprot_noncached_wc(prot); else - prot |= _PAGE_GUARDED; - - return __pgprot(prot); + return pgprot_noncached(prot); } /* @@ -414,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, - pgprot_t protection) + pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; - unsigned long prot = pgprot_val(protection); resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) - return __pgprot(prot); - - prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; + return prot; + prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; @@ -447,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, } if (found) { if (found->flags & IORESOURCE_PREFETCH) - prot &= ~_PAGE_GUARDED; + prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } - DBG("non-PCI map for %llx, prot: %lx\n", - (unsigned long long)offset, prot); + pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", + (unsigned long long)offset, pgprot_val(prot)); - return __pgprot(prot); + return prot; } @@ -610,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus, pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); @@ -853,15 +820,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, int pci_proc_domain(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); -#ifdef CONFIG_PPC64 - return hose->buid != 0; -#else + if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS)) return 0; if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0) return hose->global_number != 0; return 1; -#endif } void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, @@ -1083,27 +1047,50 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) } } -static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) +void __devinit pcibios_setup_bus_self(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB"); - - /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for - * now differently between 32 and 64 bits. - */ - if (dev != NULL) + /* Fix up the bus resources for P2P bridges */ + if (bus->self != NULL) pcibios_fixup_bridge(bus); - /* Additional setup that is different between 32 and 64 bits for now */ - pcibios_do_bus_setup(bus); - - /* Platform specific bus fixups */ + /* Platform specific bus fixups. This is currently only used + * by fsl_pci and I'm hoping to get rid of it at some point + */ if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); - /* Read default IRQs and fixup if necessary */ + /* Setup bus DMA mappings */ + if (ppc_md.pci_dma_bus_setup) + ppc_md.pci_dma_bus_setup(bus); +} + +void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) +{ + struct pci_dev *dev; + + pr_debug("PCI: Fixup bus devices %d (%s)\n", + bus->number, bus->self ? pci_name(bus->self) : "PHB"); + list_for_each_entry(dev, &bus->devices, bus_list) { + struct dev_archdata *sd = &dev->dev.archdata; + + /* Setup OF node pointer in archdata */ + sd->of_node = pci_device_to_OF_node(dev); + + /* Fixup NUMA node as it may not be setup yet by the generic + * code and is needed by the DMA init + */ + set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); + + /* Hook up default DMA ops */ + sd->dma_ops = pci_dma_ops; + sd->dma_data = (void *)PCI_DRAM_OFFSET; + + /* Additional platform DMA/iommu setup */ + if (ppc_md.pci_dma_dev_setup) + ppc_md.pci_dma_dev_setup(dev); + + /* Read default IRQs and fixup if necessary */ pci_read_irq_line(dev); if (ppc_md.pci_irq_fixup) ppc_md.pci_irq_fixup(dev); @@ -1113,22 +1100,19 @@ static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge - * bases before proceeding + * bases. This is -not- called when generating the PCI tree from + * the OF device-tree. */ if (bus->self != NULL) pci_read_bridge_bases(bus); - __pcibios_fixup_bus(bus); -} -EXPORT_SYMBOL(pcibios_fixup_bus); -/* When building a bus from the OF tree rather than probing, we need a - * slightly different version of the fixup which doesn't read the - * bridge bases using config space accesses - */ -void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus) -{ - __pcibios_fixup_bus(bus); + /* Now fixup the bus bus */ + pcibios_setup_bus_self(bus); + + /* Now fixup devices on that bus */ + pcibios_setup_bus_devices(bus); } +EXPORT_SYMBOL(pcibios_fixup_bus); static int skip_isa_ioresource_align(struct pci_dev *dev) { @@ -1198,10 +1182,10 @@ static int __init reparent_resources(struct resource *parent, *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; - DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", - p->name, - (unsigned long long)p->start, - (unsigned long long)p->end, res->name); + pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", + p->name, + (unsigned long long)p->start, + (unsigned long long)p->end, res->name); } return 0; } @@ -1245,9 +1229,12 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) int i; struct resource *res, *pr; + pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", + pci_domain_nr(bus), bus->number); + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { if ((res = bus->resource[i]) == NULL || !res->flags - || res->start > res->end) + || res->start > res->end || res->parent) continue; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? @@ -1271,14 +1258,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) } } - DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " - "[0x%x], parent %p (%s)\n", - bus->self ? pci_name(bus->self) : "PHB", - bus->number, i, - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned int)res->flags, - pr, (pr && pr->name) ? pr->name : "nil"); + pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " + "[0x%x], parent %p (%s)\n", + bus->self ? pci_name(bus->self) : "PHB", + bus->number, i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags, + pr, (pr && pr->name) ? pr->name : "nil"); if (pr && !(pr->flags & IORESOURCE_UNSET)) { if (request_resource(pr, res) == 0) @@ -1305,11 +1292,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; - DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", - pci_name(dev), idx, - (unsigned long long)r->start, - (unsigned long long)r->end, - (unsigned int)r->flags); + pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), idx, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || (pr->flags & IORESOURCE_UNSET) || @@ -1317,10 +1304,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) printk(KERN_WARNING "PCI: Cannot allocate resource region %d" " of device %s, will remap\n", idx, pci_name(dev)); if (pr) - DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, - (unsigned long long)pr->start, - (unsigned long long)pr->end, - (unsigned int)pr->flags); + pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", + pr, + (unsigned long long)pr->start, + (unsigned long long)pr->end, + (unsigned int)pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; @@ -1358,7 +1346,8 @@ static void __init pcibios_allocate_resources(int pass) * but keep it unregistered. */ u32 reg; - DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); + pr_debug("PCI: Switching off ROM of %s\n", + pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, dev->rom_base_reg, ®); pci_write_config_dword(dev, dev->rom_base_reg, @@ -1383,7 +1372,7 @@ void __init pcibios_resource_survey(void) } if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { - DBG("PCI: Assigning unassigned resouces...\n"); + pr_debug("PCI: Assigning unassigned resouces...\n"); pci_assign_unassigned_resources(); } @@ -1393,9 +1382,11 @@ void __init pcibios_resource_survey(void) } #ifdef CONFIG_HOTPLUG -/* This is used by the pSeries hotplug driver to allocate resource + +/* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the - * rest of the code later, for now, keep it as-is + * rest of the code later, for now, keep it as-is as our main + * resource allocation function doesn't deal with sub-trees yet. */ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) { @@ -1410,6 +1401,14 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) if (r->parent || !r->start || !r->flags) continue; + + pr_debug("PCI: Claiming %s: " + "Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), i, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); + pci_claim_resource(dev, i); } } @@ -1418,6 +1417,31 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) pcibios_claim_one_bus(child_bus); } EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); + + +/* pcibios_finish_adding_to_bus + * + * This is to be called by the hotplug code after devices have been + * added to a bus, this include calling it for a PHB that is just + * being added + */ +void pcibios_finish_adding_to_bus(struct pci_bus *bus) +{ + pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); + + /* Allocate bus and devices resources */ + pcibios_allocate_bus_resources(bus); + pcibios_claim_one_bus(bus); + + /* Add new devices to global lists. Register in proc, sysfs. */ + pci_bus_add_devices(bus); + + /* Fixup EEH */ + eeh_add_device_tree_late(bus); +} +EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); + #endif /* CONFIG_HOTPLUG */ int pcibios_enable_device(struct pci_dev *dev, int mask) @@ -1428,3 +1452,61 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return pci_enable_resources(dev, mask); } + +void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) +{ + struct pci_bus *bus = hose->bus; + struct resource *res; + int i; + + /* Hookup PHB IO resource */ + bus->resource[0] = res = &hose->io_resource; + + if (!res->flags) { + printk(KERN_WARNING "PCI: I/O resource not set for host" + " bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); +#ifdef CONFIG_PPC32 + /* Workaround for lack of IO resource only on 32-bit */ + res->start = (unsigned long)hose->io_base_virt - isa_io_base; + res->end = res->start + IO_SPACE_LIMIT; + res->flags = IORESOURCE_IO; +#endif /* CONFIG_PPC32 */ + } + + pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + + /* Hookup PHB Memory resources */ + for (i = 0; i < 3; ++i) { + res = &hose->mem_resources[i]; + if (!res->flags) { + if (i > 0) + continue; + printk(KERN_ERR "PCI: Memory resource 0 not set for " + "host bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); +#ifdef CONFIG_PPC32 + /* Workaround for lack of MEM resource only on 32-bit */ + res->start = hose->pci_mem_offset; + res->end = (resource_size_t)-1LL; + res->flags = IORESOURCE_MEM; +#endif /* CONFIG_PPC32 */ + } + bus->resource[i+1] = res; + + pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + } + + pr_debug("PCI: PHB MEM offset = %016llx\n", + (unsigned long long)hose->pci_mem_offset); + pr_debug("PCI: PHB IO offset = %08lx\n", + (unsigned long)hose->io_base_virt - _IO_BASE); + +} + diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 131b1dfa68c6..132cd80afa21 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -26,12 +26,6 @@ #undef DEBUG -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - unsigned long isa_io_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; @@ -272,17 +266,14 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) { struct device_node *parent, *np; - if (!have_of) - return NULL; - - DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); + pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); parent = scan_OF_for_pci_bus(bus); if (parent == NULL) return NULL; - DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>"); + pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); np = scan_OF_for_pci_dev(parent, devfn); of_node_put(parent); - DBG(" result is %s\n", np ? np->full_name : "<NULL>"); + pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); /* XXX most callers don't release the returned node * mostly because ppc64 doesn't increase the refcount, @@ -315,8 +306,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) struct pci_controller* hose; struct pci_dev* dev = NULL; - if (!have_of) - return -ENODEV; /* Make sure it's really a PCI device */ hose = pci_find_hose_for_OF_device(node); if (!hose || !hose->dn) @@ -379,10 +368,41 @@ void pcibios_make_OF_bus_map(void) } #endif /* CONFIG_PPC_OF */ +static void __devinit pcibios_scan_phb(struct pci_controller *hose) +{ + struct pci_bus *bus; + struct device_node *node = hose->dn; + unsigned long io_offset; + struct resource *res = &hose->io_resource; + + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : "<NO NAME>"); + + /* Create an empty bus for the toplevel */ + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); + if (bus == NULL) { + printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", + hose->global_number); + return; + } + bus->secondary = hose->first_busno; + hose->bus = bus; + + /* Fixup IO space offset */ + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; + res->start = (res->start + io_offset) & 0xffffffffu; + res->end = (res->end + io_offset) & 0xffffffffu; + + /* Wire up PHB bus resources */ + pcibios_setup_phb_resources(hose); + + /* Scan children */ + hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); +} + static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; - struct pci_bus *bus; int next_busno = 0; printk(KERN_INFO "PCI: Probing PCI hardware\n"); @@ -395,12 +415,8 @@ static int __init pcibios_init(void) if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; - bus = pci_scan_bus_parented(hose->parent, hose->first_busno, - hose->ops, hose); - if (bus) { - pci_bus_add_devices(bus); - hose->last_busno = bus->subordinate; - } + pcibios_scan_phb(hose); + pci_bus_add_devices(hose->bus); if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; } @@ -410,7 +426,7 @@ static int __init pcibios_init(void) * numbers vs. kernel bus numbers since we may have to * remap them. */ - if (pci_assign_all_buses && have_of) + if (pci_assign_all_buses) pcibios_make_OF_bus_map(); /* Call common code to handle resource allocation */ @@ -425,54 +441,6 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); -void __devinit pcibios_do_bus_setup(struct pci_bus *bus) -{ - struct pci_controller *hose = (struct pci_controller *) bus->sysdata; - unsigned long io_offset; - struct resource *res; - int i; - struct pci_dev *dev; - - /* Hookup PHB resources */ - io_offset = (unsigned long)hose->io_base_virt - isa_io_base; - if (bus->parent == NULL) { - /* This is a host bridge - fill in its resources */ - hose->bus = bus; - - bus->resource[0] = res = &hose->io_resource; - if (!res->flags) { - if (io_offset) - printk(KERN_ERR "I/O resource not set for host" - " bridge %d\n", hose->global_number); - res->start = 0; - res->end = IO_SPACE_LIMIT; - res->flags = IORESOURCE_IO; - } - res->start = (res->start + io_offset) & 0xffffffffu; - res->end = (res->end + io_offset) & 0xffffffffu; - - for (i = 0; i < 3; ++i) { - res = &hose->mem_resources[i]; - if (!res->flags) { - if (i > 0) - continue; - printk(KERN_ERR "Memory resource not set for " - "host bridge %d\n", hose->global_number); - res->start = hose->pci_mem_offset; - res->end = ~0U; - res->flags = IORESOURCE_MEM; - } - bus->resource[i+1] = res; - } - } - - if (ppc_md.pci_dma_bus_setup) - ppc_md.pci_dma_bus_setup(bus); - - list_for_each_entry(dev, &bus->devices, bus_list) - pcibios_setup_new_device(dev); -} - /* the next one is stolen from the alpha port... */ void __init pcibios_update_irq(struct pci_dev *dev, int irq) diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 3502b9101e6b..39fadc6e1492 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -32,13 +32,6 @@ #include <asm/machdep.h> #include <asm/ppc-pci.h> -#ifdef DEBUG -#include <asm/udbg.h> -#define DBG(fmt...) printk(fmt) -#else -#define DBG(fmt...) -#endif - unsigned long pci_probe_only = 1; /* pci_io_base -- the base address from which io bars are offsets. @@ -102,7 +95,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) addrs = of_get_property(node, "assigned-addresses", &proplen); if (!addrs) return; - DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); + pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); for (; proplen >= 20; proplen -= 20, addrs += 5) { flags = pci_parse_of_flags(addrs[0]); if (!flags) @@ -112,8 +105,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) if (!size) continue; i = addrs[0] & 0xff; - DBG(" base: %llx, size: %llx, i: %x\n", - (unsigned long long)base, (unsigned long long)size, i); + pr_debug(" base: %llx, size: %llx, i: %x\n", + (unsigned long long)base, + (unsigned long long)size, i); if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; @@ -144,7 +138,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, if (type == NULL) type = ""; - DBG(" create device, devfn: %x, type: %s\n", devfn, type); + pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); dev->bus = bus; dev->sysdata = node; @@ -165,8 +159,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, dev->class = get_int_prop(node, "class-code", 0); dev->revision = get_int_prop(node, "revision-id", 0); - DBG(" class: 0x%x\n", dev->class); - DBG(" revision: 0x%x\n", dev->revision); + pr_debug(" class: 0x%x\n", dev->class); + pr_debug(" revision: 0x%x\n", dev->revision); dev->current_state = 4; /* unknown power state */ dev->error_state = pci_channel_io_normal; @@ -187,7 +181,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pci_parse_of_addrs(node, dev); - DBG(" adding to system ...\n"); + pr_debug(" adding to system ...\n"); pci_device_add(dev, bus); @@ -195,19 +189,20 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, } EXPORT_SYMBOL(of_create_pci_dev); -void __devinit of_scan_bus(struct device_node *node, - struct pci_bus *bus) +static void __devinit __of_scan_bus(struct device_node *node, + struct pci_bus *bus, int rescan_existing) { struct device_node *child; const u32 *reg; int reglen, devfn; struct pci_dev *dev; - DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); + pr_debug("of_scan_bus(%s) bus no %d... \n", + node->full_name, bus->number); /* Scan direct children */ for_each_child_of_node(node, child) { - DBG(" * %s\n", child->full_name); + pr_debug(" * %s\n", child->full_name); reg = of_get_property(child, "reg", ®len); if (reg == NULL || reglen < 20) continue; @@ -217,11 +212,15 @@ void __devinit of_scan_bus(struct device_node *node, dev = of_create_pci_dev(child, bus, devfn); if (!dev) continue; - DBG(" dev header type: %x\n", dev->hdr_type); + pr_debug(" dev header type: %x\n", dev->hdr_type); } - /* Ally all fixups */ - pcibios_fixup_of_probed_bus(bus); + /* Apply all fixups necessary. We don't fixup the bus "self" + * for an existing bridge that is being rescanned + */ + if (!rescan_existing) + pcibios_setup_bus_self(bus); + pcibios_setup_bus_devices(bus); /* Now scan child busses */ list_for_each_entry(dev, &bus->devices, bus_list) { @@ -233,7 +232,20 @@ void __devinit of_scan_bus(struct device_node *node, } } } -EXPORT_SYMBOL(of_scan_bus); + +void __devinit of_scan_bus(struct device_node *node, + struct pci_bus *bus) +{ + __of_scan_bus(node, bus, 0); +} +EXPORT_SYMBOL_GPL(of_scan_bus); + +void __devinit of_rescan_bus(struct device_node *node, + struct pci_bus *bus) +{ + __of_scan_bus(node, bus, 1); +} +EXPORT_SYMBOL_GPL(of_rescan_bus); void __devinit of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev) @@ -245,7 +257,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, unsigned int flags; u64 size; - DBG("of_scan_pci_bridge(%s)\n", node->full_name); + pr_debug("of_scan_pci_bridge(%s)\n", node->full_name); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); @@ -309,12 +321,12 @@ void __devinit of_scan_pci_bridge(struct device_node *node, } sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), bus->number); - DBG(" bus name: %s\n", bus->name); + pr_debug(" bus name: %s\n", bus->name); mode = PCI_PROBE_NORMAL; if (ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); - DBG(" probe mode: %d\n", mode); + pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) of_scan_bus(node, bus); @@ -327,9 +339,10 @@ void __devinit scan_phb(struct pci_controller *hose) { struct pci_bus *bus; struct device_node *node = hose->dn; - int i, mode; + int mode; - DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : "<NO NAME>"); /* Create an empty bus for the toplevel */ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); @@ -345,26 +358,13 @@ void __devinit scan_phb(struct pci_controller *hose) pcibios_map_io_space(bus); /* Wire up PHB bus resources */ - DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n", - hose->io_resource.start, hose->io_resource.end, - hose->io_resource.flags); - bus->resource[0] = &hose->io_resource; - for (i = 0; i < 3; ++i) { - DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i, - hose->mem_resources[i].start, - hose->mem_resources[i].end, - hose->mem_resources[i].flags); - bus->resource[i+1] = &hose->mem_resources[i]; - } - DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset); - DBG("PCI: PHB IO offset = %08lx\n", - (unsigned long)hose->io_base_virt - _IO_BASE); + pcibios_setup_phb_resources(hose); /* Get probe mode and perform scan */ mode = PCI_PROBE_NORMAL; if (node && ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); - DBG(" probe mode: %d\n", mode); + pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) { bus->subordinate = hose->last_busno; of_scan_bus(node, bus); @@ -380,7 +380,7 @@ static int __init pcibios_init(void) printk(KERN_INFO "PCI: Probing PCI hardware\n"); - /* For now, override phys_mem_access_prot. If we need it, + /* For now, override phys_mem_access_prot. If we need it,g * later, we may move that initialization to each ppc_md */ ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; @@ -388,6 +388,11 @@ static int __init pcibios_init(void) if (pci_probe_only) ppc_pci_flags |= PPC_PCI_PROBE_ONLY; + /* On ppc64, we always enable PCI domains and we keep domain 0 + * backward compatible in /proc for video cards + */ + ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; + /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { scan_phb(hose); @@ -422,8 +427,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus) if (bus->self) { struct resource *res = bus->resource[0]; - DBG("IO unmapping for PCI-PCI bridge %s\n", - pci_name(bus->self)); + pr_debug("IO unmapping for PCI-PCI bridge %s\n", + pci_name(bus->self)); __flush_hash_table_range(&init_mm, res->start + _IO_BASE, res->end + _IO_BASE + 1); @@ -437,8 +442,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus) if (hose->io_base_alloc == 0) return 0; - DBG("IO unmapping for PHB %s\n", hose->dn->full_name); - DBG(" alloc=0x%p\n", hose->io_base_alloc); + pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); + pr_debug(" alloc=0x%p\n", hose->io_base_alloc); /* This is a PHB, we fully unmap the IO area */ vunmap(hose->io_base_alloc); @@ -463,11 +468,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) * thus HPTEs will be faulted in when needed */ if (bus->self) { - DBG("IO mapping for PCI-PCI bridge %s\n", - pci_name(bus->self)); - DBG(" virt=0x%016lx...0x%016lx\n", - bus->resource[0]->start + _IO_BASE, - bus->resource[0]->end + _IO_BASE); + pr_debug("IO mapping for PCI-PCI bridge %s\n", + pci_name(bus->self)); + pr_debug(" virt=0x%016lx...0x%016lx\n", + bus->resource[0]->start + _IO_BASE, + bus->resource[0]->end + _IO_BASE); return 0; } @@ -496,11 +501,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) hose->io_base_virt = (void __iomem *)(area->addr + hose->io_base_phys - phys_page); - DBG("IO mapping for PHB %s\n", hose->dn->full_name); - DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", - hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); - DBG(" size=0x%016lx (alloc=0x%016lx)\n", - hose->pci_io_size, size_page); + pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); + pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", + hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); + pr_debug(" size=0x%016lx (alloc=0x%016lx)\n", + hose->pci_io_size, size_page); /* Establish the mapping */ if (__ioremap_at(phys_page, area->addr, size_page, @@ -512,24 +517,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) hose->io_resource.start += io_virt_offset; hose->io_resource.end += io_virt_offset; - DBG(" hose->io_resource=0x%016lx...0x%016lx\n", - hose->io_resource.start, hose->io_resource.end); + pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n", + hose->io_resource.start, hose->io_resource.end); return 0; } EXPORT_SYMBOL_GPL(pcibios_map_io_space); -void __devinit pcibios_do_bus_setup(struct pci_bus *bus) -{ - struct pci_dev *dev; - - if (ppc_md.pci_dma_bus_setup) - ppc_md.pci_dma_bus_setup(bus); - - list_for_each_entry(dev, &bus->devices, bus_list) - pcibios_setup_new_device(dev); -} - unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose, *tmp; diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 260089dccfb0..dcec1325d340 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -116,12 +116,6 @@ EXPORT_SYMBOL(giveup_spe); #ifndef CONFIG_PPC64 EXPORT_SYMBOL(flush_instruction_cache); -EXPORT_SYMBOL(flush_tlb_kernel_range); -EXPORT_SYMBOL(flush_tlb_page); -EXPORT_SYMBOL(_tlbie); -#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) -EXPORT_SYMBOL(_tlbil_va); -#endif #endif EXPORT_SYMBOL(__flush_icache_range); EXPORT_SYMBOL(flush_dcache_range); @@ -174,8 +168,7 @@ EXPORT_SYMBOL(cacheable_memcpy); #endif #ifdef CONFIG_PPC32 -EXPORT_SYMBOL(next_mmu_context); -EXPORT_SYMBOL(set_context); +EXPORT_SYMBOL(switch_mmu_context); #endif #ifdef CONFIG_PPC_STD_MMU_32 diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/kernel/ppc_save_regs.S index 04c0b305ad4a..5113bd2285e1 100644 --- a/arch/powerpc/xmon/setjmp.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -22,7 +22,7 @@ * that will be different for 32-bit and 64-bit, because of the * different ABIs, though). */ -_GLOBAL(xmon_save_regs) +_GLOBAL(ppc_save_regs) PPC_STL r0,0*SZL(r3) PPC_STL r2,2*SZL(r3) PPC_STL r3,3*SZL(r3) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 957bded0020d..51b201ddf9a1 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -467,6 +467,8 @@ static struct regbit { {MSR_VEC, "VEC"}, {MSR_VSX, "VSX"}, {MSR_ME, "ME"}, + {MSR_CE, "CE"}, + {MSR_DE, "DE"}, {MSR_IR, "IR"}, {MSR_DR, "DR"}, {0, NULL} @@ -998,7 +1000,7 @@ unsigned long get_wchan(struct task_struct *p) return 0; } -static int kstack_depth_to_print = 64; +static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *tsk, unsigned long *stack) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3a2dc7e6586a..6f73c739f1e2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1160,6 +1160,8 @@ static inline void __init phyp_dump_reserve_mem(void) {} void __init early_init_devtree(void *params) { + unsigned long limit; + DBG(" -> early_init_devtree(%p)\n", params); /* Setup flat device-tree pointer */ @@ -1200,7 +1202,19 @@ void __init early_init_devtree(void *params) early_reserve_mem(); phyp_dump_reserve_mem(); - lmb_enforce_memory_limit(memory_limit); + limit = memory_limit; + if (! limit) { + unsigned long memsize; + + /* Ensure that total memory size is page-aligned, because + * otherwise mark_bootmem() gets upset. */ + lmb_analyze(); + memsize = lmb_phys_mem_size(); + if ((memsize & PAGE_MASK) != memsize) + limit = memsize & PAGE_MASK; + } + lmb_enforce_memory_limit(limit); + lmb_analyze(); DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); @@ -1271,6 +1285,37 @@ struct device_node *of_find_node_by_phandle(phandle handle) EXPORT_SYMBOL(of_find_node_by_phandle); /** + * of_find_next_cache_node - Find a node's subsidiary cache + * @np: node of type "cpu" or "cache" + * + * Returns a node pointer with refcount incremented, use + * of_node_put() on it when done. Caller should hold a reference + * to np. + */ +struct device_node *of_find_next_cache_node(struct device_node *np) +{ + struct device_node *child; + const phandle *handle; + + handle = of_get_property(np, "l2-cache", NULL); + if (!handle) + handle = of_get_property(np, "next-level-cache", NULL); + + if (handle) + return of_find_node_by_phandle(*handle); + + /* OF on pmac has nodes instead of properties named "l2-cache" + * beneath CPU nodes. + */ + if (!strcmp(np->type, "cpu")) + for_each_child_of_node(np, child) + if (!strcmp(child->type, "cache")) + return child; + + return NULL; +} + +/** * of_find_all_nodes - Get next node in global list * @prev: Previous node or NULL to start iteration * of_node_put() will be called on it diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index a11d68976dc8..8c1335566089 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -734,10 +734,7 @@ void of_irq_map_init(unsigned int flags) if (flags & OF_IMAP_NO_PHANDLE) { struct device_node *np; - for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) { - if (of_get_property(np, "interrupt-controller", NULL) - == NULL) - continue; + for_each_node_with_property(np, "interrupt-controller") { /* Skip /chosen/interrupt-controller */ if (strcmp(np->name, "chosen") == 0) continue; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1f8505c23548..fdfe14c4bdef 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -566,6 +566,32 @@ int rtas_get_sensor(int sensor, int index, int *state) } EXPORT_SYMBOL(rtas_get_sensor); +bool rtas_indicator_present(int token, int *maxindex) +{ + int proplen, count, i; + const struct indicator_elem { + u32 token; + u32 maxindex; + } *indicators; + + indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); + if (!indicators) + return false; + + count = proplen / sizeof(struct indicator_elem); + + for (i = 0; i < count; i++) { + if (indicators[i].token != token) + continue; + if (maxindex) + *maxindex = indicators[i].maxindex; + return true; + } + + return false; +} +EXPORT_SYMBOL(rtas_indicator_present); + int rtas_set_indicator(int indicator, int index, int new_value) { int token = rtas_token("set-indicator"); diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 589a2797eac2..8869001ab5d7 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -301,51 +301,3 @@ void __init find_and_init_phbs(void) #endif /* CONFIG_PPC32 */ } } - -/* RPA-specific bits for removing PHBs */ -int pcibios_remove_root_bus(struct pci_controller *phb) -{ - struct pci_bus *b = phb->bus; - struct resource *res; - int rc, i; - - res = b->resource[0]; - if (!res->flags) { - printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__, - b->name); - return 1; - } - - rc = pcibios_unmap_io_space(b); - if (rc) { - printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", - __func__, b->name); - return 1; - } - - if (release_resource(res)) { - printk(KERN_ERR "%s: failed to release IO on bus %s\n", - __func__, b->name); - return 1; - } - - for (i = 1; i < 3; ++i) { - res = b->resource[i]; - if (!res->flags && i == 0) { - printk(KERN_ERR "%s: no MEM resource for PHB %s\n", - __func__, b->name); - return 1; - } - if (res->flags && release_resource(res)) { - printk(KERN_ERR - "%s: failed to release IO %d on bus %s\n", - __func__, i, b->name); - return 1; - } - } - - pcibios_free_controller(phb); - - return 0; -} -EXPORT_SYMBOL(pcibios_remove_root_bus); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index c1a27626a940..9e1ca745d8f0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -38,6 +38,7 @@ #include <asm/time.h> #include <asm/serial.h> #include <asm/udbg.h> +#include <asm/mmu_context.h> #include "setup.h" @@ -49,12 +50,12 @@ int boot_cpuid; EXPORT_SYMBOL_GPL(boot_cpuid); int boot_cpuid_phys; +int smp_hw_index[NR_CPUS]; + unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; -int have_of = 1; - #ifdef CONFIG_VGA_CONSOLE unsigned long vgacon_remap_base; EXPORT_SYMBOL(vgacon_remap_base); @@ -97,6 +98,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); + do_feature_fixups(spec->mmu_features, + PTRRELOC(&__start___mmu_ftr_fixup), + PTRRELOC(&__stop___mmu_ftr_fixup)); + do_lwsync_fixups(spec->cpu_features, PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); @@ -121,6 +126,8 @@ notrace void __init machine_init(unsigned long dt_ptr) probe_machine(); + setup_kdump_trampoline(); + #ifdef CONFIG_6xx if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) @@ -326,4 +333,8 @@ void __init setup_arch(char **cmdline_p) if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); paging_init(); + + /* Initialize the MMU context management stuff */ + mmu_context_init(); + } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 169d74cef157..d8bd2161e738 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -70,7 +70,6 @@ #define DBG(fmt...) #endif -int have_of = 1; int boot_cpuid = 0; u64 ppc64_pft_size; @@ -362,6 +361,8 @@ void __init setup_system(void) */ do_feature_fixups(cur_cpu_spec->cpu_features, &__start___ftr_fixup, &__stop___ftr_fixup); + do_feature_fixups(cur_cpu_spec->mmu_features, + &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup); do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); do_lwsync_fixups(cur_cpu_spec->cpu_features, @@ -606,8 +607,6 @@ void __init setup_per_cpu_areas(void) for_each_possible_cpu(i) { ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); paca[i].data_offset = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c index bc892e69b4f7..a5e54526403d 100644 --- a/arch/powerpc/kernel/smp-tbsync.c +++ b/arch/powerpc/kernel/smp-tbsync.c @@ -113,7 +113,7 @@ void __devinit smp_generic_give_timebase(void) { int i, score, score2, old, min=0, max=5000, offset=1000; - printk("Synchronizing timebase\n"); + pr_debug("Software timebase sync\n"); /* if this fails then this kernel won't work anyway... */ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); @@ -123,13 +123,13 @@ void __devinit smp_generic_give_timebase(void) while (!tbsync->ack) barrier(); - printk("Got ack\n"); + pr_debug("Got ack\n"); /* binary search */ for (old = -1; old != offset ; offset = (min+max) / 2) { score = start_contest(kSetAndTest, offset, NUM_ITER); - printk("score %d, offset %d\n", score, offset ); + pr_debug("score %d, offset %d\n", score, offset ); if( score > 0 ) max = offset; @@ -140,8 +140,8 @@ void __devinit smp_generic_give_timebase(void) score = start_contest(kSetAndTest, min, NUM_ITER); score2 = start_contest(kSetAndTest, max, NUM_ITER); - printk("Min %d (score %d), Max %d (score %d)\n", - min, score, max, score2); + pr_debug("Min %d (score %d), Max %d (score %d)\n", + min, score, max, score2); score = abs(score); score2 = abs(score2); offset = (score < score2) ? min : max; @@ -155,7 +155,7 @@ void __devinit smp_generic_give_timebase(void) if (score2 <= score || score2 < 20) break; } - printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); + pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); /* exiting */ tbsync->cmd = kExit; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ff9f7010097d..8ac3f721d235 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,7 +57,6 @@ #define DBG(fmt...) #endif -int smp_hw_index[NR_CPUS]; struct thread_info *secondary_ti; cpumask_t cpu_possible_map = CPU_MASK_NONE; @@ -123,6 +122,65 @@ void smp_message_recv(int msg) } } +static irqreturn_t call_function_action(int irq, void *data) +{ + generic_smp_call_function_interrupt(); + return IRQ_HANDLED; +} + +static irqreturn_t reschedule_action(int irq, void *data) +{ + /* we just need the return path side effect of checking need_resched */ + return IRQ_HANDLED; +} + +static irqreturn_t call_function_single_action(int irq, void *data) +{ + generic_smp_call_function_single_interrupt(); + return IRQ_HANDLED; +} + +static irqreturn_t debug_ipi_action(int irq, void *data) +{ + smp_message_recv(PPC_MSG_DEBUGGER_BREAK); + return IRQ_HANDLED; +} + +static irq_handler_t smp_ipi_action[] = { + [PPC_MSG_CALL_FUNCTION] = call_function_action, + [PPC_MSG_RESCHEDULE] = reschedule_action, + [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, + [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, +}; + +const char *smp_ipi_name[] = { + [PPC_MSG_CALL_FUNCTION] = "ipi call function", + [PPC_MSG_RESCHEDULE] = "ipi reschedule", + [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", + [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", +}; + +/* optional function to request ipi, for controllers with >= 4 ipis */ +int smp_request_message_ipi(int virq, int msg) +{ + int err; + + if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { + return -EINVAL; + } +#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) + if (msg == PPC_MSG_DEBUGGER_BREAK) { + return 1; + } +#endif + err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, + smp_ipi_name[msg], 0); + WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", + virq, smp_ipi_name[msg], err); + + return err; +} + void smp_send_reschedule(int cpu) { if (likely(smp_ops)) @@ -408,8 +466,7 @@ out: static struct device_node *cpu_to_l2cache(int cpu) { struct device_node *np; - const phandle *php; - phandle ph; + struct device_node *cache; if (!cpu_present(cpu)) return NULL; @@ -418,13 +475,11 @@ static struct device_node *cpu_to_l2cache(int cpu) if (np == NULL) return NULL; - php = of_get_property(np, "l2-cache", NULL); - if (php == NULL) - return NULL; - ph = *php; + cache = of_find_next_cache_node(np); + of_node_put(np); - return of_find_node_by_phandle(ph); + return cache; } /* Activate a secondary processor. */ diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 77b7b34b5955..560c96119501 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -34,6 +34,6 @@ void save_processor_state(void) void restore_processor_state(void) { #ifdef CONFIG_PPC32 - set_context(current->active_mm->context.id, current->active_mm->pgd); + switch_mmu_context(NULL, current->active_mm); #endif } diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index 77fc76607ab2..b47d8ceffb52 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -5,7 +5,7 @@ #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> - +#include <asm/mmu.h> /* * Structure for storing CPU registers on the save area. @@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtibatl 3,r4 #endif -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION li r4,0 mtspr SPRN_DBAT4U,r4 mtspr SPRN_DBAT4L,r4 @@ -297,7 +297,7 @@ BEGIN_FTR_SECTION mtspr SPRN_IBAT6L,r4 mtspr SPRN_IBAT7U,r4 mtspr SPRN_IBAT7L,r4 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) /* Flush all TLBs */ lis r4,0x1000 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 20885a38237a..0c64f10087b9 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -566,7 +566,6 @@ static bool cache_is_unified(struct device_node *np) static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level) { - const phandle *next_cache_phandle; struct device_node *next_cache; struct cache_desc *new, **end; @@ -591,11 +590,7 @@ static struct cache_desc * __cpuinit create_cache_index_info(struct device_node while (*end) end = &(*end)->next; - next_cache_phandle = of_get_property(np, "l2-cache", NULL); - if (!next_cache_phandle) - goto out; - - next_cache = of_find_node_by_phandle(*next_cache_phandle); + next_cache = of_find_next_cache_node(np); if (!next_cache) goto out; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e2ee66b5831d..e1f3a5140429 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -164,8 +164,6 @@ static u64 tb_to_ns_scale __read_mostly; static unsigned tb_to_ns_shift __read_mostly; static unsigned long boot_tb __read_mostly; -static struct gettimeofday_struct do_gtod; - extern struct timezone sys_tz; static long timezone_offset; @@ -415,31 +413,9 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); - -/* - * There are two copies of tb_to_xs and stamp_xsec so that no - * lock is needed to access and use these values in - * do_gettimeofday. We alternate the copies and as long as a - * reasonable time elapses between changes, there will never - * be inconsistent values. ntpd has a minimum of one minute - * between updates. - */ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, u64 new_tb_to_xs) { - unsigned temp_idx; - struct gettimeofday_vars *temp_varp; - - temp_idx = (do_gtod.var_idx == 0); - temp_varp = &do_gtod.vars[temp_idx]; - - temp_varp->tb_to_xs = new_tb_to_xs; - temp_varp->tb_orig_stamp = new_tb_stamp; - temp_varp->stamp_xsec = new_stamp_xsec; - smp_mb(); - do_gtod.varp = temp_varp; - do_gtod.var_idx = temp_idx; - /* * tb_update_count is used to allow the userspace gettimeofday code * to assure itself that it sees a consistent view of the tb_to_xs and @@ -456,6 +432,7 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + vdso_data->stamp_xtime = xtime; smp_wmb(); ++(vdso_data->tb_update_count); } @@ -514,9 +491,7 @@ static int __init iSeries_tb_recal(void) tb_ticks_per_sec = new_tb_ticks_per_sec; calc_cputime_factors(); div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); - do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; tb_to_xs = divres.result_low; - do_gtod.varp->tb_to_xs = tb_to_xs; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; vdso_data->tb_to_xs = tb_to_xs; } @@ -988,15 +963,6 @@ void __init time_init(void) sys_tz.tz_dsttime = 0; } - do_gtod.varp = &do_gtod.vars[0]; - do_gtod.var_idx = 0; - do_gtod.varp->tb_orig_stamp = tb_last_jiffy; - __get_cpu_var(last_jiffy) = tb_last_jiffy; - do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; - do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; - do_gtod.varp->tb_to_xs = tb_to_xs; - do_gtod.tb_to_us = tb_to_us; - vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f5def6cf5cd6..5457e9575685 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1160,37 +1160,85 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address, #ifdef CONFIG_SPE void SPEFloatingPointException(struct pt_regs *regs) { + extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; int fpexc_mode; int code = 0; + int err; + + preempt_disable(); + if (regs->msr & MSR_SPE) + giveup_spe(current); + preempt_enable(); spefscr = current->thread.spefscr; fpexc_mode = current->thread.fpexc_mode; - /* Hardware does not neccessarily set sticky - * underflow/overflow/invalid flags */ if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { code = FPE_FLTOVF; - spefscr |= SPEFSCR_FOVFS; } else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { code = FPE_FLTUND; - spefscr |= SPEFSCR_FUNFS; } else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) code = FPE_FLTDIV; else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { code = FPE_FLTINV; - spefscr |= SPEFSCR_FINVS; } else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) code = FPE_FLTRES; - current->thread.spefscr = spefscr; + err = do_spe_mathemu(regs); + if (err == 0) { + regs->nip += 4; /* skip emulated instruction */ + emulate_single_step(regs); + return; + } + + if (err == -EFAULT) { + /* got an error reading the instruction */ + _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); + } else if (err == -EINVAL) { + /* didn't recognize the instruction */ + printk(KERN_ERR "unrecognized spe instruction " + "in %s at %lx\n", current->comm, regs->nip); + } else { + _exception(SIGFPE, regs, code, regs->nip); + } - _exception(SIGFPE, regs, code, regs->nip); return; } + +void SPEFloatingPointRoundException(struct pt_regs *regs) +{ + extern int speround_handler(struct pt_regs *regs); + int err; + + preempt_disable(); + if (regs->msr & MSR_SPE) + giveup_spe(current); + preempt_enable(); + + regs->nip -= 4; + err = speround_handler(regs); + if (err == 0) { + regs->nip += 4; /* skip emulated instruction */ + emulate_single_step(regs); + return; + } + + if (err == -EFAULT) { + /* got an error reading the instruction */ + _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); + } else if (err == -EINVAL) { + /* didn't recognize the instruction */ + printk(KERN_ERR "unrecognized spe instruction " + "in %s at %lx\n", current->comm, regs->nip); + } else { + _exception(SIGFPE, regs, 0, regs->nip); + return; + } +} #endif /* diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65639a43e644..ad06d5c75b15 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ -int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct page **vdso_pagelist; @@ -567,6 +566,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, do_feature_fixups(cur_cpu_spec->cpu_features, start64, start64 + size64); + start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64); + if (start64) + do_feature_fixups(cur_cpu_spec->mmu_features, + start64, start64 + size64); + start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); if (start64) do_feature_fixups(powerpc_firmware_features, @@ -583,6 +587,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, do_feature_fixups(cur_cpu_spec->cpu_features, start32, start32 + size32); + start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32); + if (start32) + do_feature_fixups(cur_cpu_spec->mmu_features, + start32, start32 + size32); + #ifdef CONFIG_PPC64 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); if (start32) diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 72ca26df457e..ee038d4bf252 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -16,6 +16,13 @@ #include <asm/asm-offsets.h> #include <asm/unistd.h> +/* Offset for the low 32-bit part of a field of long type */ +#ifdef CONFIG_PPC64 +#define LOPART 4 +#else +#define LOPART 0 +#endif + .text /* * Exact prototype of gettimeofday @@ -90,101 +97,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) mflr r12 /* r12 saves lr */ .cfi_register lr,r12 - mr r10,r3 /* r10 saves id */ mr r11,r4 /* r11 saves tp */ bl __get_datapage@local /* get data page */ mr r9,r3 /* datapage ptr in r9 */ - beq cr1,50f /* if monotonic -> jump there */ - - /* - * CLOCK_REALTIME - */ - - bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 98f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r5,r4,12,20,31 - rlwimi r5,r3,12,0,19 - stw r5,TSPC32_TV_SEC(r11) - /* get remaining xsec and convert to nsec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication, then we multiply by 1000 - */ - rlwinm r5,r4,12,0,19 - lis r6,1000000@h - ori r6,r6,1000000@l - mulhwu r5,r5,r6 - mulli r5,r5,1000 - stw r5,TSPC32_TV_NSEC(r11) - mtlr r12 - crclr cr0*4+so - li r3,0 - blr +50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */ + bne cr1,80f /* not monotonic -> all done */ /* * CLOCK_MONOTONIC */ -50: bl __do_get_xsec@local /* get xsec from tb & kernel */ - bne- 98f /* out of line -> do syscall */ - - /* seconds are xsec >> 20 */ - rlwinm r6,r4,12,20,31 - rlwimi r6,r3,12,0,19 - - /* get remaining xsec and convert to nsec. we scale - * up remaining xsec by 12 bits and get the top 32 bits - * of the multiplication, then we multiply by 1000 - */ - rlwinm r7,r4,12,0,19 - lis r5,1000000@h - ori r5,r5,1000000@l - mulhwu r7,r7,r5 - mulli r7,r7,1000 - /* now we must fixup using wall to monotonic. We need to snapshot * that value and do the counter trick again. Fortunately, we still * have the counter value in r8 that was returned by __do_get_xsec. - * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5 - * can be used + * At this point, r3,r4 contain our sec/nsec values, r5 and r6 + * can be used, r7 contains NSEC_PER_SEC. */ - lwz r3,WTOM_CLOCK_SEC(r9) - lwz r4,WTOM_CLOCK_NSEC(r9) + lwz r5,WTOM_CLOCK_SEC(r9) + lwz r6,WTOM_CLOCK_NSEC(r9) - /* We now have our result in r3,r4. We create a fake dependency - * on that result and re-check the counter + /* We now have our offset in r5,r6. We create a fake dependency + * on that value and re-check the counter */ - or r5,r4,r3 - xor r0,r5,r5 + or r0,r6,r5 + xor r0,r0,r0 add r9,r9,r0 -#ifdef CONFIG_PPC64 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) -#else - lwz r0,(CFG_TB_UPDATE_COUNT)(r9) -#endif + lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) cmpl cr0,r8,r0 /* check if updated */ bne- 50b - /* Calculate and store result. Note that this mimmics the C code, + /* Calculate and store result. Note that this mimics the C code, * which may cause funny results if nsec goes negative... is that * possible at all ? */ - add r3,r3,r6 - add r4,r4,r7 - lis r5,NSEC_PER_SEC@h - ori r5,r5,NSEC_PER_SEC@l - cmpl cr0,r4,r5 - cmpli cr1,r4,0 + add r3,r3,r5 + add r4,r4,r6 + cmpw cr0,r4,r7 + cmpwi cr1,r4,0 blt 1f - subf r4,r5,r4 + subf r4,r7,r4 addi r3,r3,1 -1: bge cr1,1f +1: bge cr1,80f addi r3,r3,-1 - add r4,r4,r5 -1: stw r3,TSPC32_TV_SEC(r11) + add r4,r4,r7 + +80: stw r3,TSPC32_TV_SEC(r11) stw r4,TSPC32_TV_NSEC(r11) mtlr r12 @@ -195,10 +154,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) /* * syscall fallback */ -98: - mtlr r12 - mr r3,r10 - mr r4,r11 99: li r0,__NR_clock_gettime sc @@ -254,11 +209,7 @@ __do_get_xsec: /* Check for update count & load values. We use the low * order 32 bits of the update count */ -#ifdef CONFIG_PPC64 -1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9) -#else -1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9) -#endif +1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) andi. r0,r8,1 /* pending update ? loop */ bne- 1b xor r0,r8,r8 /* create dependency */ @@ -305,11 +256,7 @@ __do_get_xsec: or r6,r4,r3 xor r0,r6,r6 add r9,r9,r0 -#ifdef CONFIG_PPC64 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) -#else - lwz r0,(CFG_TB_UPDATE_COUNT)(r9) -#endif + lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) cmpl cr0,r8,r0 /* check if updated */ bne- 1b @@ -322,3 +269,98 @@ __do_get_xsec: */ 3: blr .cfi_endproc + +/* + * This is the core of clock_gettime(), it returns the current + * time in seconds and nanoseconds in r3 and r4. + * It expects the datapage ptr in r9 and doesn't clobber it. + * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7. + * On return, r8 contains the counter value that can be reused. + * This clobbers cr0 but not any other cr field. + */ +__do_get_tspec: + .cfi_startproc + /* Check for update count & load values. We use the low + * order 32 bits of the update count + */ +1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) + andi. r0,r8,1 /* pending update ? loop */ + bne- 1b + xor r0,r8,r8 /* create dependency */ + add r9,r9,r0 + + /* Load orig stamp (offset to TB) */ + lwz r5,CFG_TB_ORIG_STAMP(r9) + lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) + + /* Get a stable TB value */ +2: mftbu r3 + mftbl r4 + mftbu r0 + cmpl cr0,r3,r0 + bne- 2b + + /* Subtract tb orig stamp and shift left 12 bits. + */ + subfc r7,r6,r4 + subfe r0,r5,r3 + slwi r0,r0,12 + rlwimi. r0,r7,12,20,31 + slwi r7,r7,12 + + /* Load scale factor & do multiplication */ + lwz r5,CFG_TB_TO_XS(r9) /* load values */ + lwz r6,(CFG_TB_TO_XS+4)(r9) + mulhwu r3,r7,r6 + mullw r10,r7,r5 + mulhwu r4,r7,r5 + addc r10,r3,r10 + li r3,0 + + beq+ 4f /* skip high part computation if 0 */ + mulhwu r3,r0,r5 + mullw r7,r0,r5 + mulhwu r5,r0,r6 + mullw r6,r0,r6 + adde r4,r4,r7 + addze r3,r3 + addc r4,r4,r5 + addze r3,r3 + addc r10,r10,r6 + +4: addze r4,r4 /* add in carry */ + lis r7,NSEC_PER_SEC@h + ori r7,r7,NSEC_PER_SEC@l + mulhwu r4,r4,r7 /* convert to nanoseconds */ + + /* At this point, we have seconds & nanoseconds since the xtime + * stamp in r3+CA and r4. Load & add the xtime stamp. + */ +#ifdef CONFIG_PPC64 + lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9) + lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9) +#else + lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9) + lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9) +#endif + add r4,r4,r6 + adde r3,r3,r5 + + /* We now have our result in r3,r4. We create a fake dependency + * on that result and re-check the counter + */ + or r6,r4,r3 + xor r0,r6,r6 + add r9,r9,r0 + lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) + cmpl cr0,r8,r0 /* check if updated */ + bne- 1b + + /* check for nanosecond overflow and adjust if necessary */ + cmpw r4,r7 + bltlr /* all done if no overflow */ + subf r4,r7,r4 /* adjust if overflow */ + addi r3,r3,1 + + blr + .cfi_endproc diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index be3b6a41dc09..904ef1360dd7 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -34,6 +34,9 @@ SECTIONS __ftr_fixup : { *(__ftr_fixup) } . = ALIGN(8); + __mmu_ftr_fixup : { *(__mmu_ftr_fixup) } + + . = ALIGN(8); __lwsync_fixup : { *(__lwsync_fixup) } #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index c6401f9e37f1..262cd5857a56 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -75,90 +75,49 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) mflr r12 /* r12 saves lr */ .cfi_register lr,r12 - mr r10,r3 /* r10 saves id */ mr r11,r4 /* r11 saves tp */ bl V_LOCAL_FUNC(__get_datapage) /* get data page */ - beq cr1,50f /* if monotonic -> jump there */ - - /* - * CLOCK_REALTIME - */ - - bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - std r5,TSPC64_TV_SEC(r11) /* store sec in tv */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r0,r0,44,20 - mulli r0,r0,1000 /* nsec = usec * 1000 */ - std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */ - - mtlr r12 - crclr cr0*4+so - li r3,0 - blr +50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */ + bne cr1,80f /* if not monotonic, all done */ /* * CLOCK_MONOTONIC */ -50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ - - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ - ori r7,r7,16960 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / - * XSEC_PER_SEC - */ - rldicl r6,r0,44,20 - mulli r6,r6,1000 /* nsec = usec * 1000 */ - /* now we must fixup using wall to monotonic. We need to snapshot * that value and do the counter trick again. Fortunately, we still - * have the counter value in r8 that was returned by __do_get_xsec. - * At this point, r5,r6 contain our sec/nsec values. - * can be used + * have the counter value in r8 that was returned by __do_get_tspec. + * At this point, r4,r5 contain our sec/nsec values. */ - lwa r4,WTOM_CLOCK_SEC(r3) - lwa r7,WTOM_CLOCK_NSEC(r3) + lwa r6,WTOM_CLOCK_SEC(r3) + lwa r9,WTOM_CLOCK_NSEC(r3) - /* We now have our result in r4,r7. We create a fake dependency + /* We now have our result in r6,r9. We create a fake dependency * on that result and re-check the counter */ - or r9,r4,r7 - xor r0,r9,r9 + or r0,r6,r9 + xor r0,r0,r0 add r3,r3,r0 ld r0,CFG_TB_UPDATE_COUNT(r3) cmpld cr0,r0,r8 /* check if updated */ bne- 50b - /* Calculate and store result. Note that this mimmics the C code, - * which may cause funny results if nsec goes negative... is that - * possible at all ? + /* Add wall->monotonic offset and check for overflow or underflow. */ - add r4,r4,r5 - add r7,r7,r6 - lis r9,NSEC_PER_SEC@h - ori r9,r9,NSEC_PER_SEC@l - cmpl cr0,r7,r9 - cmpli cr1,r7,0 + add r4,r4,r6 + add r5,r5,r9 + cmpd cr0,r5,r7 + cmpdi cr1,r5,0 blt 1f - subf r7,r9,r7 + subf r5,r7,r5 addi r4,r4,1 -1: bge cr1,1f +1: bge cr1,80f addi r4,r4,-1 - add r7,r7,r9 -1: std r4,TSPC64_TV_SEC(r11) - std r7,TSPC64_TV_NSEC(r11) + add r5,r5,r7 + +80: std r4,TSPC64_TV_SEC(r11) + std r5,TSPC64_TV_NSEC(r11) mtlr r12 crclr cr0*4+so @@ -168,10 +127,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) /* * syscall fallback */ -98: - mtlr r12 - mr r3,r10 - mr r4,r11 99: li r0,__NR_clock_gettime sc @@ -253,3 +208,59 @@ V_FUNCTION_BEGIN(__do_get_xsec) blr .cfi_endproc V_FUNCTION_END(__do_get_xsec) + +/* + * This is the core of clock_gettime(), it returns the current + * time in seconds and nanoseconds in r4 and r5. + * It expects the datapage ptr in r3 and doesn't clobber it. + * It clobbers r0 and r6 and returns NSEC_PER_SEC in r7. + * On return, r8 contains the counter value that can be reused. + * This clobbers cr0 but not any other cr field. + */ +V_FUNCTION_BEGIN(__do_get_tspec) + .cfi_startproc + /* check for update count & load values */ +1: ld r8,CFG_TB_UPDATE_COUNT(r3) + andi. r0,r8,1 /* pending update ? loop */ + bne- 1b + xor r0,r8,r8 /* create dependency */ + add r3,r3,r0 + + /* Get TB & offset it. We use the MFTB macro which will generate + * workaround code for Cell. + */ + MFTB(r7) + ld r9,CFG_TB_ORIG_STAMP(r3) + subf r7,r9,r7 + + /* Scale result */ + ld r5,CFG_TB_TO_XS(r3) + sldi r7,r7,12 /* compute time since stamp_xtime */ + mulhdu r6,r7,r5 /* in units of 2^-32 seconds */ + + /* Add stamp since epoch */ + ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3) + ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3) + or r0,r4,r5 + or r0,r0,r6 + xor r0,r0,r0 + add r3,r3,r0 + ld r0,CFG_TB_UPDATE_COUNT(r3) + cmpld r0,r8 /* check if updated */ + bne- 1b /* reload if so */ + + /* convert to seconds & nanoseconds and add to stamp */ + lis r7,NSEC_PER_SEC@h + ori r7,r7,NSEC_PER_SEC@l + mulhwu r0,r6,r7 /* compute nanoseconds and */ + srdi r6,r6,32 /* seconds since stamp_xtime */ + clrldi r0,r0,32 + add r5,r5,r0 /* add nanoseconds together */ + cmpd r5,r7 /* overflow? */ + add r4,r4,r6 + bltlr /* all done if no overflow */ + subf r5,r7,r5 /* if overflow, adjust */ + addi r4,r4,1 + blr + .cfi_endproc +V_FUNCTION_END(__do_get_tspec) diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index d0b2526dd38d..0e615404e247 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -35,6 +35,9 @@ SECTIONS __ftr_fixup : { *(__ftr_fixup) } . = ALIGN(8); + __mmu_ftr_fixup : { *(__mmu_ftr_fixup) } + + . = ALIGN(8); __lwsync_fixup : { *(__lwsync_fixup) } . = ALIGN(8); diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index a11e6bc59b30..94aa7b011b27 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -41,9 +41,9 @@ static struct bus_type vio_bus_type; static struct vio_dev vio_bus_device = { /* fake "parent" device */ - .name = vio_bus_device.dev.bus_id, + .name = "vio", .type = "", - .dev.bus_id = "vio", + .dev.init_name = "vio", .dev.bus = &vio_bus_type, }; @@ -1216,7 +1216,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) viodev->irq = irq_of_parse_and_map(of_node, 0); - snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); + dev_set_name(&viodev->dev, "%x", *unit_address); viodev->name = of_node->name; viodev->type = of_node->type; viodev->unit_address = *unit_address; @@ -1243,7 +1243,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* register with generic device framework */ if (device_register(&viodev->dev)) { printk(KERN_ERR "%s: failed to register device %s\n", - __func__, viodev->dev.bus_id); + __func__, dev_name(&viodev->dev)); /* XXX free TCE table */ kfree(viodev); return NULL; @@ -1400,13 +1400,13 @@ static struct vio_dev *vio_find_name(const char *name) struct vio_dev *vio_find_node(struct device_node *vnode) { const uint32_t *unit_address; - char kobj_name[BUS_ID_SIZE]; + char kobj_name[20]; /* construct the kobject name from the device node */ unit_address = of_get_property(vnode, "reg", NULL); if (!unit_address) return NULL; - snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); + snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); return vio_find_name(kobj_name); } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 2412c056baa4..47bf15cd2c9e 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -152,6 +152,12 @@ SECTIONS __stop___ftr_fixup = .; } . = ALIGN(8); + __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) { + __start___mmu_ftr_fixup = .; + *(__mmu_ftr_fixup) + __stop___mmu_ftr_fixup = .; + } + . = ALIGN(8); __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { __start___lwsync_fixup = .; *(__lwsync_fixup) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index fda9baada132..8bef0efcdfe1 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -28,6 +28,7 @@ #include <asm/uaccess.h> #include <asm/kvm_ppc.h> #include <asm/tlbflush.h> +#include "../mm/mmu_decl.h" gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) @@ -330,7 +331,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) /* XXX It would be nice to differentiate between heavyweight exit and * sched_out here, since we could avoid the TLB flush for heavyweight * exits. */ - _tlbia(); + _tlbil_all(); } int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index d69912c07ce7..8db35278a4b4 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -6,6 +6,9 @@ ifeq ($(CONFIG_PPC64),y) EXTRA_CFLAGS += -mno-minimal-toc endif +CFLAGS_REMOVE_code-patching.o = -pg +CFLAGS_REMOVE_feature-fixups.o = -pg + obj-y := string.o alloc.o \ checksum_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 25ec5378afa4..70693a5c12a1 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user) andi. r6,r6,7 PPC_MTOCRF 0x01,r5 blt cr1,.Lshort_copy +/* Below we want to nop out the bne if we're on a CPU that has the + * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit + * cleared. + * At the time of writing the only CPU that has this combination of bits + * set is Power6. + */ +BEGIN_FTR_SECTION + nop +FTR_SECTION_ELSE bne .Ldst_unaligned +ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ + CPU_FTR_UNALIGNED_LD_STD) .Ldst_aligned: - andi. r0,r4,7 addi r3,r3,-16 +BEGIN_FTR_SECTION + andi. r0,r4,7 bne .Lsrc_unaligned +END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) srdi r7,r5,4 20: ld r9,0(r4) addi r4,r4,-8 @@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user) PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ subf r5,r6,r5 li r7,0 - cmpldi r1,r5,16 + cmpldi cr1,r5,16 bf cr7*4+3,1f 35: lbz r0,0(r4) 81: stb r0,0(r3) diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c index 31734c0969cd..b7dc4c19f582 100644 --- a/arch/powerpc/lib/dma-noncoherent.c +++ b/arch/powerpc/lib/dma-noncoherent.c @@ -77,26 +77,26 @@ static DEFINE_SPINLOCK(consistent_lock); * the amount of RAM found at boot time.) I would imagine that get_vm_area() * would have to initialise this each time prior to calling vm_region_alloc(). */ -struct vm_region { +struct ppc_vm_region { struct list_head vm_list; unsigned long vm_start; unsigned long vm_end; }; -static struct vm_region consistent_head = { +static struct ppc_vm_region consistent_head = { .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; -static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) +static struct ppc_vm_region * +ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; - struct vm_region *c, *new; + struct ppc_vm_region *c, *new; - new = kmalloc(sizeof(struct vm_region), gfp); + new = kmalloc(sizeof(struct ppc_vm_region), gfp); if (!new) goto out; @@ -130,9 +130,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) return NULL; } -static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) +static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) { - struct vm_region *c; + struct ppc_vm_region *c; list_for_each_entry(c, &head->vm_list, vm_list) { if (c->vm_start == addr) @@ -151,7 +151,7 @@ void * __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page; - struct vm_region *c; + struct ppc_vm_region *c; unsigned long order; u64 mask = 0x00ffffff, limit; /* ISA default */ @@ -191,7 +191,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) /* * Allocate a virtual address in the consistent mapping region. */ - c = vm_region_alloc(&consistent_head, size, + c = ppc_vm_region_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { unsigned long vaddr = c->vm_start; @@ -239,7 +239,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent); */ void __dma_free_coherent(size_t size, void *vaddr) { - struct vm_region *c; + struct ppc_vm_region *c; unsigned long flags, addr; pte_t *ptep; @@ -247,7 +247,7 @@ void __dma_free_coherent(size_t size, void *vaddr) spin_lock_irqsave(&consistent_lock, flags); - c = vm_region_find(&consistent_head, (unsigned long)vaddr); + c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); if (!c) goto no_area; @@ -320,7 +320,6 @@ static int __init dma_alloc_init(void) ret = -ENOMEM; break; } - WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); if (!pte) { diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 3f131129d1c1..fe2d34e5332d 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -18,11 +18,23 @@ _GLOBAL(memcpy) andi. r6,r6,7 dcbt 0,r4 blt cr1,.Lshort_copy +/* Below we want to nop out the bne if we're on a CPU that has the + CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit + cleared. + At the time of writing the only CPU that has this combination of bits + set is Power6. */ +BEGIN_FTR_SECTION + nop +FTR_SECTION_ELSE bne .Ldst_unaligned +ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ + CPU_FTR_UNALIGNED_LD_STD) .Ldst_aligned: - andi. r0,r4,7 addi r3,r3,-16 +BEGIN_FTR_SECTION + andi. r0,r4,7 bne .Lsrc_unaligned +END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) srdi r7,r5,4 ld r9,0(r4) addi r4,r4,-8 @@ -131,7 +143,7 @@ _GLOBAL(memcpy) PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 subf r5,r6,r5 li r7,0 - cmpldi r1,r5,16 + cmpldi cr1,r5,16 bf cr7*4+3,1f lbz r0,0(r4) stb r0,0(r3) diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 03aa98dd9f0a..f9e506a735ae 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -11,6 +11,8 @@ obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \ mcrfs.o mffs.o mtfsb0.o mtfsb1.o \ mtfsf.o mtfsfi.o stfiwx.o stfs.o +obj-$(CONFIG_SPE) += math_efp.o + CFLAGS_fabs.o = -fno-builtin-fabs CFLAGS_math.o = -fno-builtin-fabs diff --git a/arch/powerpc/math-emu/fadd.c b/arch/powerpc/math-emu/fadd.c index 04d3b4aa32ce..0158a16e2b82 100644 --- a/arch/powerpc/math-emu/fadd.c +++ b/arch/powerpc/math-emu/fadd.c @@ -13,7 +13,6 @@ fadd(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); diff --git a/arch/powerpc/math-emu/fcmpo.c b/arch/powerpc/math-emu/fcmpo.c index b5dc4498cd71..5bce011c2aec 100644 --- a/arch/powerpc/math-emu/fcmpo.c +++ b/arch/powerpc/math-emu/fcmpo.c @@ -14,7 +14,6 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB) FP_DECL_EX; int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) }; long cmp; - int ret = 0; #ifdef DEBUG printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB); @@ -29,7 +28,7 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB) #endif if (A_c == FP_CLS_NAN || B_c == FP_CLS_NAN) - ret |= EFLAG_VXVC; + FP_SET_EXCEPTION(EFLAG_VXVC); FP_CMP_D(cmp, A, B, 2); cmp = code[(cmp + 1) & 3]; @@ -44,5 +43,5 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB) printk("CR: %08x\n", *ccr); #endif - return ret; + return FP_CUR_EXCEPTIONS; } diff --git a/arch/powerpc/math-emu/fdiv.c b/arch/powerpc/math-emu/fdiv.c index 2db15097d98e..a29239c05e3e 100644 --- a/arch/powerpc/math-emu/fdiv.c +++ b/arch/powerpc/math-emu/fdiv.c @@ -13,7 +13,6 @@ fdiv(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); @@ -28,22 +27,22 @@ fdiv(void *frD, void *frA, void *frB) #endif if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { - ret |= EFLAG_VXZDZ; + FP_SET_EXCEPTION(EFLAG_VXZDZ); #ifdef DEBUG printk("%s: FPSCR_VXZDZ raised\n", __func__); #endif } if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { - ret |= EFLAG_VXIDI; + FP_SET_EXCEPTION(EFLAG_VXIDI); #ifdef DEBUG printk("%s: FPSCR_VXIDI raised\n", __func__); #endif } if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { - ret |= EFLAG_DIVZERO; + FP_SET_EXCEPTION(EFLAG_DIVZERO); if (__FPU_TRAP_P(EFLAG_DIVZERO)) - return ret; + return FP_CUR_EXCEPTIONS; } FP_DIV_D(R, A, B); diff --git a/arch/powerpc/math-emu/fdivs.c b/arch/powerpc/math-emu/fdivs.c index 797f6a9a20b5..526bc261275f 100644 --- a/arch/powerpc/math-emu/fdivs.c +++ b/arch/powerpc/math-emu/fdivs.c @@ -14,7 +14,6 @@ fdivs(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); @@ -29,22 +28,22 @@ fdivs(void *frD, void *frA, void *frB) #endif if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { - ret |= EFLAG_VXZDZ; + FP_SET_EXCEPTION(EFLAG_VXZDZ); #ifdef DEBUG printk("%s: FPSCR_VXZDZ raised\n", __func__); #endif } if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { - ret |= EFLAG_VXIDI; + FP_SET_EXCEPTION(EFLAG_VXIDI); #ifdef DEBUG printk("%s: FPSCR_VXIDI raised\n", __func__); #endif } if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { - ret |= EFLAG_DIVZERO; + FP_SET_EXCEPTION(EFLAG_DIVZERO); if (__FPU_TRAP_P(EFLAG_DIVZERO)) - return ret; + return FP_CUR_EXCEPTIONS; } FP_DIV_D(R, A, B); diff --git a/arch/powerpc/math-emu/fmadd.c b/arch/powerpc/math-emu/fmadd.c index 925313aa6f82..8c3f20aa5a95 100644 --- a/arch/powerpc/math-emu/fmadd.c +++ b/arch/powerpc/math-emu/fmadd.c @@ -15,7 +15,6 @@ fmadd(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -33,12 +32,12 @@ fmadd(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fmadds.c b/arch/powerpc/math-emu/fmadds.c index aea80ef79399..794fb31e59d1 100644 --- a/arch/powerpc/math-emu/fmadds.c +++ b/arch/powerpc/math-emu/fmadds.c @@ -16,7 +16,6 @@ fmadds(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -34,12 +33,12 @@ fmadds(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fmsub.c b/arch/powerpc/math-emu/fmsub.c index a644d525fca6..626f6fed84ac 100644 --- a/arch/powerpc/math-emu/fmsub.c +++ b/arch/powerpc/math-emu/fmsub.c @@ -15,7 +15,6 @@ fmsub(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -33,7 +32,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); @@ -41,7 +40,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fmsubs.c b/arch/powerpc/math-emu/fmsubs.c index 2fdeeb9bb569..3425bc899760 100644 --- a/arch/powerpc/math-emu/fmsubs.c +++ b/arch/powerpc/math-emu/fmsubs.c @@ -16,7 +16,6 @@ fmsubs(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -34,7 +33,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); @@ -42,7 +41,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fmul.c b/arch/powerpc/math-emu/fmul.c index 391fd17d3440..2c1929779892 100644 --- a/arch/powerpc/math-emu/fmul.c +++ b/arch/powerpc/math-emu/fmul.c @@ -13,7 +13,6 @@ fmul(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); @@ -31,7 +30,7 @@ fmul(void *frD, void *frA, void *frB) if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(R, A, B); diff --git a/arch/powerpc/math-emu/fmuls.c b/arch/powerpc/math-emu/fmuls.c index 2d3ec5f7da20..f5ad5c9c77d0 100644 --- a/arch/powerpc/math-emu/fmuls.c +++ b/arch/powerpc/math-emu/fmuls.c @@ -14,7 +14,6 @@ fmuls(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); @@ -32,7 +31,7 @@ fmuls(void *frD, void *frA, void *frB) if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(R, A, B); diff --git a/arch/powerpc/math-emu/fnmadd.c b/arch/powerpc/math-emu/fnmadd.c index 2497b86494e5..e817bc5453ef 100644 --- a/arch/powerpc/math-emu/fnmadd.c +++ b/arch/powerpc/math-emu/fnmadd.c @@ -15,7 +15,6 @@ fnmadd(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -33,12 +32,12 @@ fnmadd(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fnmadds.c b/arch/powerpc/math-emu/fnmadds.c index ee9d71e0b376..4db4b7d9ba8d 100644 --- a/arch/powerpc/math-emu/fnmadds.c +++ b/arch/powerpc/math-emu/fnmadds.c @@ -16,7 +16,6 @@ fnmadds(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -34,12 +33,12 @@ fnmadds(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fnmsub.c b/arch/powerpc/math-emu/fnmsub.c index 3885a77acc93..f65979fa770e 100644 --- a/arch/powerpc/math-emu/fnmsub.c +++ b/arch/powerpc/math-emu/fnmsub.c @@ -15,7 +15,6 @@ fnmsub(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -33,7 +32,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); @@ -41,7 +40,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fnmsubs.c b/arch/powerpc/math-emu/fnmsubs.c index f835dfeb0fd1..9021dacc03b8 100644 --- a/arch/powerpc/math-emu/fnmsubs.c +++ b/arch/powerpc/math-emu/fnmsubs.c @@ -16,7 +16,6 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC) FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); @@ -34,7 +33,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC) if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) - ret |= EFLAG_VXIMZ; + FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); @@ -42,7 +41,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); diff --git a/arch/powerpc/math-emu/fsqrt.c b/arch/powerpc/math-emu/fsqrt.c index 3e90072693a0..a55fc7d49983 100644 --- a/arch/powerpc/math-emu/fsqrt.c +++ b/arch/powerpc/math-emu/fsqrt.c @@ -12,7 +12,6 @@ fsqrt(void *frD, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frB); @@ -25,9 +24,9 @@ fsqrt(void *frD, void *frB) #endif if (B_s && B_c != FP_CLS_ZERO) - ret |= EFLAG_VXSQRT; + FP_SET_EXCEPTION(EFLAG_VXSQRT); if (B_c == FP_CLS_NAN) - ret |= EFLAG_VXSNAN; + FP_SET_EXCEPTION(EFLAG_VXSNAN); FP_SQRT_D(R, B); diff --git a/arch/powerpc/math-emu/fsqrts.c b/arch/powerpc/math-emu/fsqrts.c index 2843be986e2e..31dccbfc39ff 100644 --- a/arch/powerpc/math-emu/fsqrts.c +++ b/arch/powerpc/math-emu/fsqrts.c @@ -13,7 +13,6 @@ fsqrts(void *frD, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frB); @@ -26,9 +25,9 @@ fsqrts(void *frD, void *frB) #endif if (B_s && B_c != FP_CLS_ZERO) - ret |= EFLAG_VXSQRT; + FP_SET_EXCEPTION(EFLAG_VXSQRT); if (B_c == FP_CLS_NAN) - ret |= EFLAG_VXSNAN; + FP_SET_EXCEPTION(EFLAG_VXSNAN); FP_SQRT_D(R, B); diff --git a/arch/powerpc/math-emu/fsub.c b/arch/powerpc/math-emu/fsub.c index 78b09446a0e1..02c5dff458ba 100644 --- a/arch/powerpc/math-emu/fsub.c +++ b/arch/powerpc/math-emu/fsub.c @@ -13,7 +13,6 @@ fsub(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); @@ -31,7 +30,7 @@ fsub(void *frD, void *frA, void *frB) B_s ^= 1; if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, A, B); diff --git a/arch/powerpc/math-emu/fsubs.c b/arch/powerpc/math-emu/fsubs.c index d3bf90863cf2..5d9b18c35e07 100644 --- a/arch/powerpc/math-emu/fsubs.c +++ b/arch/powerpc/math-emu/fsubs.c @@ -14,7 +14,6 @@ fsubs(void *frD, void *frA, void *frB) FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; - int ret = 0; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); @@ -32,7 +31,7 @@ fsubs(void *frD, void *frA, void *frB) B_s ^= 1; if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF) - ret |= EFLAG_VXISI; + FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, A, B); diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c new file mode 100644 index 000000000000..41f4ef30e480 --- /dev/null +++ b/arch/powerpc/math-emu/math_efp.c @@ -0,0 +1,720 @@ +/* + * arch/powerpc/math-emu/math_efp.c + * + * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Ebony Zhu, <ebony.zhu@freescale.com> + * Yu Liu, <yu.liu@freescale.com> + * + * Derived from arch/alpha/math-emu/math.c + * arch/powerpc/math-emu/math.c + * + * Description: + * This file is the exception handler to make E500 SPE instructions + * fully comply with IEEE-754 floating point standard. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/types.h> + +#include <asm/uaccess.h> +#include <asm/reg.h> + +#define FP_EX_BOOKE_E500_SPE +#include <asm/sfp-machine.h> + +#include <math-emu/soft-fp.h> +#include <math-emu/single.h> +#include <math-emu/double.h> + +#define EFAPU 0x4 + +#define VCT 0x4 +#define SPFP 0x6 +#define DPFP 0x7 + +#define EFSADD 0x2c0 +#define EFSSUB 0x2c1 +#define EFSABS 0x2c4 +#define EFSNABS 0x2c5 +#define EFSNEG 0x2c6 +#define EFSMUL 0x2c8 +#define EFSDIV 0x2c9 +#define EFSCMPGT 0x2cc +#define EFSCMPLT 0x2cd +#define EFSCMPEQ 0x2ce +#define EFSCFD 0x2cf +#define EFSCFSI 0x2d1 +#define EFSCTUI 0x2d4 +#define EFSCTSI 0x2d5 +#define EFSCTUF 0x2d6 +#define EFSCTSF 0x2d7 +#define EFSCTUIZ 0x2d8 +#define EFSCTSIZ 0x2da + +#define EVFSADD 0x280 +#define EVFSSUB 0x281 +#define EVFSABS 0x284 +#define EVFSNABS 0x285 +#define EVFSNEG 0x286 +#define EVFSMUL 0x288 +#define EVFSDIV 0x289 +#define EVFSCMPGT 0x28c +#define EVFSCMPLT 0x28d +#define EVFSCMPEQ 0x28e +#define EVFSCTUI 0x294 +#define EVFSCTSI 0x295 +#define EVFSCTUF 0x296 +#define EVFSCTSF 0x297 +#define EVFSCTUIZ 0x298 +#define EVFSCTSIZ 0x29a + +#define EFDADD 0x2e0 +#define EFDSUB 0x2e1 +#define EFDABS 0x2e4 +#define EFDNABS 0x2e5 +#define EFDNEG 0x2e6 +#define EFDMUL 0x2e8 +#define EFDDIV 0x2e9 +#define EFDCTUIDZ 0x2ea +#define EFDCTSIDZ 0x2eb +#define EFDCMPGT 0x2ec +#define EFDCMPLT 0x2ed +#define EFDCMPEQ 0x2ee +#define EFDCFS 0x2ef +#define EFDCTUI 0x2f4 +#define EFDCTSI 0x2f5 +#define EFDCTUF 0x2f6 +#define EFDCTSF 0x2f7 +#define EFDCTUIZ 0x2f8 +#define EFDCTSIZ 0x2fa + +#define AB 2 +#define XA 3 +#define XB 4 +#define XCR 5 +#define NOTYPE 0 + +#define SIGN_BIT_S (1UL << 31) +#define SIGN_BIT_D (1ULL << 63) +#define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \ + FP_EX_UNDERFLOW | FP_EX_OVERFLOW) + +union dw_union { + u64 dp[1]; + u32 wp[2]; +}; + +static unsigned long insn_type(unsigned long speinsn) +{ + unsigned long ret = NOTYPE; + + switch (speinsn & 0x7ff) { + case EFSABS: ret = XA; break; + case EFSADD: ret = AB; break; + case EFSCFD: ret = XB; break; + case EFSCMPEQ: ret = XCR; break; + case EFSCMPGT: ret = XCR; break; + case EFSCMPLT: ret = XCR; break; + case EFSCTSF: ret = XB; break; + case EFSCTSI: ret = XB; break; + case EFSCTSIZ: ret = XB; break; + case EFSCTUF: ret = XB; break; + case EFSCTUI: ret = XB; break; + case EFSCTUIZ: ret = XB; break; + case EFSDIV: ret = AB; break; + case EFSMUL: ret = AB; break; + case EFSNABS: ret = XA; break; + case EFSNEG: ret = XA; break; + case EFSSUB: ret = AB; break; + case EFSCFSI: ret = XB; break; + + case EVFSABS: ret = XA; break; + case EVFSADD: ret = AB; break; + case EVFSCMPEQ: ret = XCR; break; + case EVFSCMPGT: ret = XCR; break; + case EVFSCMPLT: ret = XCR; break; + case EVFSCTSF: ret = XB; break; + case EVFSCTSI: ret = XB; break; + case EVFSCTSIZ: ret = XB; break; + case EVFSCTUF: ret = XB; break; + case EVFSCTUI: ret = XB; break; + case EVFSCTUIZ: ret = XB; break; + case EVFSDIV: ret = AB; break; + case EVFSMUL: ret = AB; break; + case EVFSNABS: ret = XA; break; + case EVFSNEG: ret = XA; break; + case EVFSSUB: ret = AB; break; + + case EFDABS: ret = XA; break; + case EFDADD: ret = AB; break; + case EFDCFS: ret = XB; break; + case EFDCMPEQ: ret = XCR; break; + case EFDCMPGT: ret = XCR; break; + case EFDCMPLT: ret = XCR; break; + case EFDCTSF: ret = XB; break; + case EFDCTSI: ret = XB; break; + case EFDCTSIDZ: ret = XB; break; + case EFDCTSIZ: ret = XB; break; + case EFDCTUF: ret = XB; break; + case EFDCTUI: ret = XB; break; + case EFDCTUIDZ: ret = XB; break; + case EFDCTUIZ: ret = XB; break; + case EFDDIV: ret = AB; break; + case EFDMUL: ret = AB; break; + case EFDNABS: ret = XA; break; + case EFDNEG: ret = XA; break; + case EFDSUB: ret = AB; break; + + default: + printk(KERN_ERR "\nOoops! SPE instruction no type found."); + printk(KERN_ERR "\ninst code: %08lx\n", speinsn); + } + + return ret; +} + +int do_spe_mathemu(struct pt_regs *regs) +{ + FP_DECL_EX; + int IR, cmp; + + unsigned long type, func, fc, fa, fb, src, speinsn; + union dw_union vc, va, vb; + + if (get_user(speinsn, (unsigned int __user *) regs->nip)) + return -EFAULT; + if ((speinsn >> 26) != EFAPU) + return -EINVAL; /* not an spe instruction */ + + type = insn_type(speinsn); + if (type == NOTYPE) + return -ENOSYS; + + func = speinsn & 0x7ff; + fc = (speinsn >> 21) & 0x1f; + fa = (speinsn >> 16) & 0x1f; + fb = (speinsn >> 11) & 0x1f; + src = (speinsn >> 5) & 0x7; + + vc.wp[0] = current->thread.evr[fc]; + vc.wp[1] = regs->gpr[fc]; + va.wp[0] = current->thread.evr[fa]; + va.wp[1] = regs->gpr[fa]; + vb.wp[0] = current->thread.evr[fb]; + vb.wp[1] = regs->gpr[fb]; + + __FPU_FPSCR = mfspr(SPRN_SPEFSCR); + +#ifdef DEBUG + printk("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR); + printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]); + printk("va: %08x %08x\n", va.wp[0], va.wp[1]); + printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]); +#endif + + switch (src) { + case SPFP: { + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + + switch (type) { + case AB: + case XCR: + FP_UNPACK_SP(SA, va.wp + 1); + case XB: + FP_UNPACK_SP(SB, vb.wp + 1); + break; + case XA: + FP_UNPACK_SP(SA, va.wp + 1); + break; + } + +#ifdef DEBUG + printk("SA: %ld %08lx %ld (%ld)\n", SA_s, SA_f, SA_e, SA_c); + printk("SB: %ld %08lx %ld (%ld)\n", SB_s, SB_f, SB_e, SB_c); +#endif + + switch (func) { + case EFSABS: + vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; + goto update_regs; + + case EFSNABS: + vc.wp[1] = va.wp[1] | SIGN_BIT_S; + goto update_regs; + + case EFSNEG: + vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; + goto update_regs; + + case EFSADD: + FP_ADD_S(SR, SA, SB); + goto pack_s; + + case EFSSUB: + FP_SUB_S(SR, SA, SB); + goto pack_s; + + case EFSMUL: + FP_MUL_S(SR, SA, SB); + goto pack_s; + + case EFSDIV: + FP_DIV_S(SR, SA, SB); + goto pack_s; + + case EFSCMPEQ: + cmp = 0; + goto cmp_s; + + case EFSCMPGT: + cmp = 1; + goto cmp_s; + + case EFSCMPLT: + cmp = -1; + goto cmp_s; + + case EFSCTSF: + case EFSCTUF: + if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) { + /* NaN */ + if (((vb.wp[1] >> 23) & 0xff) == 0) { + /* denorm */ + vc.wp[1] = 0x0; + } else if ((vb.wp[1] >> 31) == 0) { + /* positive normal */ + vc.wp[1] = (func == EFSCTSF) ? + 0x7fffffff : 0xffffffff; + } else { /* negative normal */ + vc.wp[1] = (func == EFSCTSF) ? + 0x80000000 : 0x0; + } + } else { /* rB is NaN */ + vc.wp[1] = 0x0; + } + goto update_regs; + + case EFSCFD: { + FP_DECL_D(DB); + FP_CLEAR_EXCEPTIONS; + FP_UNPACK_DP(DB, vb.dp); +#ifdef DEBUG + printk("DB: %ld %08lx %08lx %ld (%ld)\n", + DB_s, DB_f1, DB_f0, DB_e, DB_c); +#endif + FP_CONV(S, D, 1, 2, SR, DB); + goto pack_s; + } + + case EFSCTSI: + case EFSCTSIZ: + case EFSCTUI: + case EFSCTUIZ: + if (func & 0x4) { + _FP_ROUND(1, SB); + } else { + _FP_ROUND_ZERO(1, SB); + } + FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0)); + goto update_regs; + + default: + goto illegal; + } + break; + +pack_s: +#ifdef DEBUG + printk("SR: %ld %08lx %ld (%ld)\n", SR_s, SR_f, SR_e, SR_c); +#endif + FP_PACK_SP(vc.wp + 1, SR); + goto update_regs; + +cmp_s: + FP_CMP_S(IR, SA, SB, 3); + if (IR == 3 && (FP_ISSIGNAN_S(SA) || FP_ISSIGNAN_S(SB))) + FP_SET_EXCEPTION(FP_EX_INVALID); + if (IR == cmp) { + IR = 0x4; + } else { + IR = 0; + } + goto update_ccr; + } + + case DPFP: { + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); + + switch (type) { + case AB: + case XCR: + FP_UNPACK_DP(DA, va.dp); + case XB: + FP_UNPACK_DP(DB, vb.dp); + break; + case XA: + FP_UNPACK_DP(DA, va.dp); + break; + } + +#ifdef DEBUG + printk("DA: %ld %08lx %08lx %ld (%ld)\n", + DA_s, DA_f1, DA_f0, DA_e, DA_c); + printk("DB: %ld %08lx %08lx %ld (%ld)\n", + DB_s, DB_f1, DB_f0, DB_e, DB_c); +#endif + + switch (func) { + case EFDABS: + vc.dp[0] = va.dp[0] & ~SIGN_BIT_D; + goto update_regs; + + case EFDNABS: + vc.dp[0] = va.dp[0] | SIGN_BIT_D; + goto update_regs; + + case EFDNEG: + vc.dp[0] = va.dp[0] ^ SIGN_BIT_D; + goto update_regs; + + case EFDADD: + FP_ADD_D(DR, DA, DB); + goto pack_d; + + case EFDSUB: + FP_SUB_D(DR, DA, DB); + goto pack_d; + + case EFDMUL: + FP_MUL_D(DR, DA, DB); + goto pack_d; + + case EFDDIV: + FP_DIV_D(DR, DA, DB); + goto pack_d; + + case EFDCMPEQ: + cmp = 0; + goto cmp_d; + + case EFDCMPGT: + cmp = 1; + goto cmp_d; + + case EFDCMPLT: + cmp = -1; + goto cmp_d; + + case EFDCTSF: + case EFDCTUF: + if (!((vb.wp[0] >> 20) == 0x7ff && + ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) { + /* not a NaN */ + if (((vb.wp[0] >> 20) & 0x7ff) == 0) { + /* denorm */ + vc.wp[1] = 0x0; + } else if ((vb.wp[0] >> 31) == 0) { + /* positive normal */ + vc.wp[1] = (func == EFDCTSF) ? + 0x7fffffff : 0xffffffff; + } else { /* negative normal */ + vc.wp[1] = (func == EFDCTSF) ? + 0x80000000 : 0x0; + } + } else { /* NaN */ + vc.wp[1] = 0x0; + } + goto update_regs; + + case EFDCFS: { + FP_DECL_S(SB); + FP_CLEAR_EXCEPTIONS; + FP_UNPACK_SP(SB, vb.wp + 1); +#ifdef DEBUG + printk("SB: %ld %08lx %ld (%ld)\n", + SB_s, SB_f, SB_e, SB_c); +#endif + FP_CONV(D, S, 2, 1, DR, SB); + goto pack_d; + } + + case EFDCTUIDZ: + case EFDCTSIDZ: + _FP_ROUND_ZERO(2, DB); + FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0)); + goto update_regs; + + case EFDCTUI: + case EFDCTSI: + case EFDCTUIZ: + case EFDCTSIZ: + if (func & 0x4) { + _FP_ROUND(2, DB); + } else { + _FP_ROUND_ZERO(2, DB); + } + FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0)); + goto update_regs; + + default: + goto illegal; + } + break; + +pack_d: +#ifdef DEBUG + printk("DR: %ld %08lx %08lx %ld (%ld)\n", + DR_s, DR_f1, DR_f0, DR_e, DR_c); +#endif + FP_PACK_DP(vc.dp, DR); + goto update_regs; + +cmp_d: + FP_CMP_D(IR, DA, DB, 3); + if (IR == 3 && (FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) + FP_SET_EXCEPTION(FP_EX_INVALID); + if (IR == cmp) { + IR = 0x4; + } else { + IR = 0; + } + goto update_ccr; + + } + + case VCT: { + FP_DECL_S(SA0); FP_DECL_S(SB0); FP_DECL_S(SR0); + FP_DECL_S(SA1); FP_DECL_S(SB1); FP_DECL_S(SR1); + int IR0, IR1; + + switch (type) { + case AB: + case XCR: + FP_UNPACK_SP(SA0, va.wp); + FP_UNPACK_SP(SA1, va.wp + 1); + case XB: + FP_UNPACK_SP(SB0, vb.wp); + FP_UNPACK_SP(SB1, vb.wp + 1); + break; + case XA: + FP_UNPACK_SP(SA0, va.wp); + FP_UNPACK_SP(SA1, va.wp + 1); + break; + } + +#ifdef DEBUG + printk("SA0: %ld %08lx %ld (%ld)\n", SA0_s, SA0_f, SA0_e, SA0_c); + printk("SA1: %ld %08lx %ld (%ld)\n", SA1_s, SA1_f, SA1_e, SA1_c); + printk("SB0: %ld %08lx %ld (%ld)\n", SB0_s, SB0_f, SB0_e, SB0_c); + printk("SB1: %ld %08lx %ld (%ld)\n", SB1_s, SB1_f, SB1_e, SB1_c); +#endif + + switch (func) { + case EVFSABS: + vc.wp[0] = va.wp[0] & ~SIGN_BIT_S; + vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; + goto update_regs; + + case EVFSNABS: + vc.wp[0] = va.wp[0] | SIGN_BIT_S; + vc.wp[1] = va.wp[1] | SIGN_BIT_S; + goto update_regs; + + case EVFSNEG: + vc.wp[0] = va.wp[0] ^ SIGN_BIT_S; + vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; + goto update_regs; + + case EVFSADD: + FP_ADD_S(SR0, SA0, SB0); + FP_ADD_S(SR1, SA1, SB1); + goto pack_vs; + + case EVFSSUB: + FP_SUB_S(SR0, SA0, SB0); + FP_SUB_S(SR1, SA1, SB1); + goto pack_vs; + + case EVFSMUL: + FP_MUL_S(SR0, SA0, SB0); + FP_MUL_S(SR1, SA1, SB1); + goto pack_vs; + + case EVFSDIV: + FP_DIV_S(SR0, SA0, SB0); + FP_DIV_S(SR1, SA1, SB1); + goto pack_vs; + + case EVFSCMPEQ: + cmp = 0; + goto cmp_vs; + + case EVFSCMPGT: + cmp = 1; + goto cmp_vs; + + case EVFSCMPLT: + cmp = -1; + goto cmp_vs; + + case EVFSCTSF: + __asm__ __volatile__ ("mtspr 512, %4\n" + "efsctsf %0, %2\n" + "efsctsf %1, %3\n" + : "=r" (vc.wp[0]), "=r" (vc.wp[1]) + : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0)); + goto update_regs; + + case EVFSCTUF: + __asm__ __volatile__ ("mtspr 512, %4\n" + "efsctuf %0, %2\n" + "efsctuf %1, %3\n" + : "=r" (vc.wp[0]), "=r" (vc.wp[1]) + : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0)); + goto update_regs; + + case EVFSCTUI: + case EVFSCTSI: + case EVFSCTUIZ: + case EVFSCTSIZ: + if (func & 0x4) { + _FP_ROUND(1, SB0); + _FP_ROUND(1, SB1); + } else { + _FP_ROUND_ZERO(1, SB0); + _FP_ROUND_ZERO(1, SB1); + } + FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0)); + FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0)); + goto update_regs; + + default: + goto illegal; + } + break; + +pack_vs: +#ifdef DEBUG + printk("SR0: %ld %08lx %ld (%ld)\n", SR0_s, SR0_f, SR0_e, SR0_c); + printk("SR1: %ld %08lx %ld (%ld)\n", SR1_s, SR1_f, SR1_e, SR1_c); +#endif + FP_PACK_SP(vc.wp, SR0); + FP_PACK_SP(vc.wp + 1, SR1); + goto update_regs; + +cmp_vs: + { + int ch, cl; + + FP_CMP_S(IR0, SA0, SB0, 3); + FP_CMP_S(IR1, SA1, SB1, 3); + if (IR0 == 3 && (FP_ISSIGNAN_S(SA0) || FP_ISSIGNAN_S(SB0))) + FP_SET_EXCEPTION(FP_EX_INVALID); + if (IR1 == 3 && (FP_ISSIGNAN_S(SA1) || FP_ISSIGNAN_S(SB1))) + FP_SET_EXCEPTION(FP_EX_INVALID); + ch = (IR0 == cmp) ? 1 : 0; + cl = (IR1 == cmp) ? 1 : 0; + IR = (ch << 3) | (cl << 2) | ((ch | cl) << 1) | + ((ch & cl) << 0); + goto update_ccr; + } + } + default: + return -EINVAL; + } + +update_ccr: + regs->ccr &= ~(15 << ((7 - ((speinsn >> 23) & 0x7)) << 2)); + regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2)); + +update_regs: + __FPU_FPSCR &= ~FP_EX_MASK; + __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK); + mtspr(SPRN_SPEFSCR, __FPU_FPSCR); + + current->thread.evr[fc] = vc.wp[0]; + regs->gpr[fc] = vc.wp[1]; + +#ifdef DEBUG + printk("ccr = %08lx\n", regs->ccr); + printk("cur exceptions = %08x spefscr = %08lx\n", + FP_CUR_EXCEPTIONS, __FPU_FPSCR); + printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]); + printk("va: %08x %08x\n", va.wp[0], va.wp[1]); + printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]); +#endif + + return 0; + +illegal: + printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn); + return -ENOSYS; +} + +int speround_handler(struct pt_regs *regs) +{ + union dw_union fgpr; + int s_lo, s_hi; + unsigned long speinsn, type, fc; + + if (get_user(speinsn, (unsigned int __user *) regs->nip)) + return -EFAULT; + if ((speinsn >> 26) != 4) + return -EINVAL; /* not an spe instruction */ + + type = insn_type(speinsn & 0x7ff); + if (type == XCR) return -ENOSYS; + + fc = (speinsn >> 21) & 0x1f; + s_lo = regs->gpr[fc] & SIGN_BIT_S; + s_hi = current->thread.evr[fc] & SIGN_BIT_S; + fgpr.wp[0] = current->thread.evr[fc]; + fgpr.wp[1] = regs->gpr[fc]; + + __FPU_FPSCR = mfspr(SPRN_SPEFSCR); + + switch ((speinsn >> 5) & 0x7) { + /* Since SPE instructions on E500 core can handle round to nearest + * and round toward zero with IEEE-754 complied, we just need + * to handle round toward +Inf and round toward -Inf by software. + */ + case SPFP: + if ((FP_ROUNDMODE) == FP_RND_PINF) { + if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */ + } else { /* round to -Inf */ + if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */ + } + break; + + case DPFP: + if (FP_ROUNDMODE == FP_RND_PINF) { + if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */ + } else { /* round to -Inf */ + if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */ + } + break; + + case VCT: + if (FP_ROUNDMODE == FP_RND_PINF) { + if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */ + if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */ + } else { /* round to -Inf */ + if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */ + if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */ + } + break; + + default: + return -EINVAL; + } + + current->thread.evr[fc] = fgpr.wp[0]; + regs->gpr[fc] = fgpr.wp[1]; + + return 0; +} diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index e7392b45a5ef..953cc4a1cde5 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -6,17 +6,19 @@ ifeq ($(CONFIG_PPC64),y) EXTRA_CFLAGS += -mno-minimal-toc endif -obj-y := fault.o mem.o \ +obj-y := fault.o mem.o pgtable.o \ init_$(CONFIG_WORD_SIZE).o \ - pgtable_$(CONFIG_WORD_SIZE).o \ - mmu_context_$(CONFIG_WORD_SIZE).o + pgtable_$(CONFIG_WORD_SIZE).o +obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ + tlb_nohash_low.o hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC64) += hash_utils_64.o \ slb_low.o slb.o stab.o \ gup.o mmap.o $(hash-y) obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ - tlb_$(CONFIG_WORD_SIZE).o + tlb_hash$(CONFIG_WORD_SIZE).o \ + mmu_context_hash$(CONFIG_WORD_SIZE).o obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 565b7a237c84..91c7b8636b8a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -30,6 +30,7 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> +#include <asm/firmware.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu.h> @@ -283,7 +284,7 @@ good_area: } pte_update(ptep, 0, _PAGE_HWEXEC | _PAGE_ACCESSED); - _tlbie(address, mm->context.id); + local_flush_tlb_page(vma, address); pte_unmap_unlock(ptep, ptl); up_read(&mm->mmap_sem); return 0; @@ -318,9 +319,16 @@ good_area: goto do_sigbus; BUG(); } - if (ret & VM_FAULT_MAJOR) + if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - else +#ifdef CONFIG_PPC_SMLPAR + if (firmware_has_feature(FW_FEATURE_CMO)) { + preempt_disable(); + get_lppaca()->page_ins += (1 << PAGE_FACTOR); + preempt_enable(); + } +#endif + } else current->min_flt++; up_read(&mm->mmap_sem); return 0; @@ -339,7 +347,7 @@ bad_area_nosemaphore: && printk_ratelimit()) printk(KERN_CRIT "kernel tried to execute NX-protected" " page (%lx) - exploit attempt? (uid: %d)\n", - address, current->uid); + address, current_uid()); return SIGSEGV; diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 7bffb70b9fe2..67850ec9feb3 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -36,36 +36,6 @@ mmu_hash_lock: #endif /* CONFIG_SMP */ /* - * Sync CPUs with hash_page taking & releasing the hash - * table lock - */ -#ifdef CONFIG_SMP - .text -_GLOBAL(hash_page_sync) - mfmsr r10 - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - mtmsr r0 - lis r8,mmu_hash_lock@h - ori r8,r8,mmu_hash_lock@l - lis r0,0x0fff - b 10f -11: lwz r6,0(r8) - cmpwi 0,r6,0 - bne 11b -10: lwarx r6,0,r8 - cmpwi 0,r6,0 - bne- 11b - stwcx. r0,0,r8 - bne- 10b - isync - eieio - li r0,0 - stw r0,0(r8) - mtmsr r10 - blr -#endif /* CONFIG_SMP */ - -/* * Load a PTE into the hash table, if possible. * The address is in r4, and r3 contains an access flag: * _PAGE_RW (0x400) if a write. @@ -353,8 +323,8 @@ _GLOBAL(create_hpte) ori r8,r8,0xe14 /* clear out reserved bits and M */ andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ BEGIN_FTR_SECTION - ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ -END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) + rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ +END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) #ifdef CONFIG_PTE_64BIT /* Put the XPN bits into the PTE */ rlwimi r8,r10,8,20,22 @@ -663,3 +633,80 @@ _GLOBAL(flush_hash_patch_B) SYNC_601 isync blr + +/* + * Flush an entry from the TLB + */ +_GLOBAL(_tlbie) +#ifdef CONFIG_SMP + rlwinm r8,r1,0,0,(31-THREAD_SHIFT) + lwz r8,TI_CPU(r8) + oris r8,r8,11 + mfmsr r10 + SYNC + rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ + rlwinm r0,r0,0,28,26 /* clear DR */ + mtmsr r0 + SYNC_601 + isync + lis r9,mmu_hash_lock@h + ori r9,r9,mmu_hash_lock@l + tophys(r9,r9) +10: lwarx r7,0,r9 + cmpwi 0,r7,0 + bne- 10b + stwcx. r8,0,r9 + bne- 10b + eieio + tlbie r3 + sync + TLBSYNC + li r0,0 + stw r0,0(r9) /* clear mmu_hash_lock */ + mtmsr r10 + SYNC_601 + isync +#else /* CONFIG_SMP */ + tlbie r3 + sync +#endif /* CONFIG_SMP */ + blr + +/* + * Flush the entire TLB. 603/603e only + */ +_GLOBAL(_tlbia) +#if defined(CONFIG_SMP) + rlwinm r8,r1,0,0,(31-THREAD_SHIFT) + lwz r8,TI_CPU(r8) + oris r8,r8,10 + mfmsr r10 + SYNC + rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ + rlwinm r0,r0,0,28,26 /* clear DR */ + mtmsr r0 + SYNC_601 + isync + lis r9,mmu_hash_lock@h + ori r9,r9,mmu_hash_lock@l + tophys(r9,r9) +10: lwarx r7,0,r9 + cmpwi 0,r7,0 + bne- 10b + stwcx. r8,0,r9 + bne- 10b + sync + tlbia + sync + TLBSYNC + li r0,0 + stw r0,0(r9) /* clear mmu_hash_lock */ + mtmsr r10 + SYNC_601 + isync +#else /* CONFIG_SMP */ + sync + tlbia + sync +#endif /* CONFIG_SMP */ + blr diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f0c3b88d50fa..201c7a5486cb 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,8 +53,7 @@ unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */ /* Subtract one from array size because we don't need a cache for 4K since * is not a huge page size */ -#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \ - + psize-1]) +#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1) #define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize]) static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = { @@ -113,7 +112,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, unsigned long address, unsigned int psize) { - pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize), + pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], GFP_KERNEL|__GFP_REPEAT); if (! new) @@ -121,7 +120,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, spin_lock(&mm->page_table_lock); if (!hugepd_none(*hpdp)) - kmem_cache_free(huge_pgtable_cache(psize), new); + kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new); else hpdp->pd = (unsigned long)new | HUGEPD_OK; spin_unlock(&mm->page_table_lock); @@ -763,13 +762,14 @@ static int __init hugetlbpage_init(void) for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { if (mmu_huge_psizes[psize]) { - huge_pgtable_cache(psize) = kmem_cache_create( - HUGEPTE_CACHE_NAME(psize), - HUGEPTE_TABLE_SIZE(psize), - HUGEPTE_TABLE_SIZE(psize), - 0, - NULL); - if (!huge_pgtable_cache(psize)) + pgtable_cache[HUGE_PGTABLE_INDEX(psize)] = + kmem_cache_create( + HUGEPTE_CACHE_NAME(psize), + HUGEPTE_TABLE_SIZE(psize), + HUGEPTE_TABLE_SIZE(psize), + 0, + NULL); + if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)]) panic("hugetlbpage_init(): could not create %s"\ "\n", HUGEPTE_CACHE_NAME(psize)); } diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 388ceda632f3..666a5e8a5be1 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -35,7 +35,6 @@ #include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/io.h> -#include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/smp.h> @@ -49,7 +48,7 @@ #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ -#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE)) +#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" #endif #endif @@ -180,9 +179,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:setio", 0x302); - /* Initialize the context management stuff */ - mmu_context_init(); - if (ppc_md.progress) ppc_md.progress("MMU:exit", 0x211); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9e1a1da6e52..53b06ebb3f2f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -102,8 +102,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); if (!page_is_ram(pfn)) - vma_prot = __pgprot(pgprot_val(vma_prot) - | _PAGE_GUARDED | _PAGE_NO_CACHE); + vma_prot = pgprot_noncached(vma_prot); + return vma_prot; } EXPORT_SYMBOL(phys_mem_access_prot); @@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ - _tlbie(address, 0 /* 8xx doesn't care about PID */); + _tlbil_va(address, 0 /* 8xx doesn't care about PID */); #endif /* The _PAGE_USER test should really be _PAGE_EXEC, but * older glibc versions execute some code from no-exec diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c deleted file mode 100644 index cc32ba41d900..000000000000 --- a/arch/powerpc/mm/mmu_context_32.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * This file contains the routines for handling the MMU on those - * PowerPC implementations where the MMU substantially follows the - * architecture specification. This includes the 6xx, 7xx, 7xxx, - * 8260, and POWER3 implementations but excludes the 8xx and 4xx. - * -- paulus - * - * Derived from arch/ppc/mm/init.c: - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) - * and Cort Dougan (PReP) (cort@cs.nmt.edu) - * Copyright (C) 1996 Paul Mackerras - * - * Derived from "arch/i386/mm/init.c" - * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/mm.h> -#include <linux/init.h> - -#include <asm/mmu_context.h> -#include <asm/tlbflush.h> - -unsigned long next_mmu_context; -unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; -#ifdef FEW_CONTEXTS -atomic_t nr_free_contexts; -struct mm_struct *context_mm[LAST_CONTEXT+1]; -void steal_context(void); -#endif /* FEW_CONTEXTS */ - -/* - * Initialize the context management stuff. - */ -void __init -mmu_context_init(void) -{ - /* - * Some processors have too few contexts to reserve one for - * init_mm, and require using context 0 for a normal task. - * Other processors reserve the use of context zero for the kernel. - * This code assumes FIRST_CONTEXT < 32. - */ - context_map[0] = (1 << FIRST_CONTEXT) - 1; - next_mmu_context = FIRST_CONTEXT; -#ifdef FEW_CONTEXTS - atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); -#endif /* FEW_CONTEXTS */ -} - -#ifdef FEW_CONTEXTS -/* - * Steal a context from a task that has one at the moment. - * This is only used on 8xx and 4xx and we presently assume that - * they don't do SMP. If they do then this will have to check - * whether the MM we steal is in use. - * We also assume that this is only used on systems that don't - * use an MMU hash table - this is true for 8xx and 4xx. - * This isn't an LRU system, it just frees up each context in - * turn (sort-of pseudo-random replacement :). This would be the - * place to implement an LRU scheme if anyone was motivated to do it. - * -- paulus - */ -void -steal_context(void) -{ - struct mm_struct *mm; - - /* free up context `next_mmu_context' */ - /* if we shouldn't free context 0, don't... */ - if (next_mmu_context < FIRST_CONTEXT) - next_mmu_context = FIRST_CONTEXT; - mm = context_mm[next_mmu_context]; - flush_tlb_mm(mm); - destroy_context(mm); -} -#endif /* FEW_CONTEXTS */ diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c new file mode 100644 index 000000000000..0dfba2bf7f31 --- /dev/null +++ b/arch/powerpc/mm/mmu_context_hash32.c @@ -0,0 +1,103 @@ +/* + * This file contains the routines for handling the MMU on those + * PowerPC implementations where the MMU substantially follows the + * architecture specification. This includes the 6xx, 7xx, 7xxx, + * 8260, and POWER3 implementations but excludes the 8xx and 4xx. + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/mm.h> +#include <linux/init.h> + +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> + +/* + * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs + * (virtual segment identifiers) for each context. Although the + * hardware supports 24-bit VSIDs, and thus >1 million contexts, + * we only use 32,768 of them. That is ample, since there can be + * at most around 30,000 tasks in the system anyway, and it means + * that we can use a bitmap to indicate which contexts are in use. + * Using a bitmap means that we entirely avoid all of the problems + * that we used to have when the context number overflowed, + * particularly on SMP systems. + * -- paulus. + */ +#define NO_CONTEXT ((unsigned long) -1) +#define LAST_CONTEXT 32767 +#define FIRST_CONTEXT 1 + +/* + * This function defines the mapping from contexts to VSIDs (virtual + * segment IDs). We use a skew on both the context and the high 4 bits + * of the 32-bit virtual address (the "effective segment ID") in order + * to spread out the entries in the MMU hash table. Note, if this + * function is changed then arch/ppc/mm/hashtable.S will have to be + * changed to correspond. + * + * + * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ + * & 0xffffff) + */ + +static unsigned long next_mmu_context; +static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; + + +/* + * Set up the context for a new address space. + */ +int init_new_context(struct task_struct *t, struct mm_struct *mm) +{ + unsigned long ctx = next_mmu_context; + + while (test_and_set_bit(ctx, context_map)) { + ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); + if (ctx > LAST_CONTEXT) + ctx = 0; + } + next_mmu_context = (ctx + 1) & LAST_CONTEXT; + mm->context.id = ctx; + + return 0; +} + +/* + * We're finished using the context for an address space. + */ +void destroy_context(struct mm_struct *mm) +{ + preempt_disable(); + if (mm->context.id != NO_CONTEXT) { + clear_bit(mm->context.id, context_map); + mm->context.id = NO_CONTEXT; + } + preempt_enable(); +} + +/* + * Initialize the context management stuff. + */ +void __init mmu_context_init(void) +{ + /* Reserve context 0 for kernel use */ + context_map[0] = (1 << FIRST_CONTEXT) - 1; + next_mmu_context = FIRST_CONTEXT; +} diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_hash64.c index 1db38ba1f544..dbeb86ac90cd 100644 --- a/arch/powerpc/mm/mmu_context_64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -24,6 +24,14 @@ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDR(mmu_context_idr); +/* + * The proto-VSID space has 2^35 - 1 segments available for user mappings. + * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, + * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). + */ +#define NO_CONTEXT 0 +#define MAX_CONTEXT ((1UL << 19) - 1) + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c new file mode 100644 index 000000000000..52a0cfc38b64 --- /dev/null +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -0,0 +1,397 @@ +/* + * This file contains the routines for handling the MMU on those + * PowerPC implementations where the MMU is not using the hash + * table, such as 8xx, 4xx, BookE's etc... + * + * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> + * IBM Corp. + * + * Derived from previous arch/powerpc/mm/mmu_context.c + * and arch/powerpc/include/asm/mmu_context.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * TODO: + * + * - The global context lock will not scale very well + * - The maps should be dynamically allocated to allow for processors + * that support more PID bits at runtime + * - Implement flush_tlb_mm() by making the context stale and picking + * a new one + * - More aggressively clear stale map bits and maybe find some way to + * also clear mm->cpu_vm_mask bits when processes are migrated + */ + +#undef DEBUG +#define DEBUG_STEAL_ONLY +#undef DEBUG_MAP_CONSISTENCY +/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/bootmem.h> +#include <linux/notifier.h> +#include <linux/cpu.h> + +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> + +static unsigned int first_context, last_context; +static unsigned int next_context, nr_free_contexts; +static unsigned long *context_map; +static unsigned long *stale_map[NR_CPUS]; +static struct mm_struct **context_mm; +static spinlock_t context_lock = SPIN_LOCK_UNLOCKED; + +#define CTX_MAP_SIZE \ + (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) + + +/* Steal a context from a task that has one at the moment. + * + * This is used when we are running out of available PID numbers + * on the processors. + * + * This isn't an LRU system, it just frees up each context in + * turn (sort-of pseudo-random replacement :). This would be the + * place to implement an LRU scheme if anyone was motivated to do it. + * -- paulus + * + * For context stealing, we use a slightly different approach for + * SMP and UP. Basically, the UP one is simpler and doesn't use + * the stale map as we can just flush the local CPU + * -- benh + */ +#ifdef CONFIG_SMP +static unsigned int steal_context_smp(unsigned int id) +{ + struct mm_struct *mm; + unsigned int cpu, max; + + again: + max = last_context - first_context; + + /* Attempt to free next_context first and then loop until we manage */ + while (max--) { + /* Pick up the victim mm */ + mm = context_mm[id]; + + /* We have a candidate victim, check if it's active, on SMP + * we cannot steal active contexts + */ + if (mm->context.active) { + id++; + if (id > last_context) + id = first_context; + continue; + } + pr_debug("[%d] steal context %d from mm @%p\n", + smp_processor_id(), id, mm); + + /* Mark this mm has having no context anymore */ + mm->context.id = MMU_NO_CONTEXT; + + /* Mark it stale on all CPUs that used this mm */ + for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask) + __set_bit(id, stale_map[cpu]); + return id; + } + + /* This will happen if you have more CPUs than available contexts, + * all we can do here is wait a bit and try again + */ + spin_unlock(&context_lock); + cpu_relax(); + spin_lock(&context_lock); + goto again; +} +#endif /* CONFIG_SMP */ + +/* Note that this will also be called on SMP if all other CPUs are + * offlined, which means that it may be called for cpu != 0. For + * this to work, we somewhat assume that CPUs that are onlined + * come up with a fully clean TLB (or are cleaned when offlined) + */ +static unsigned int steal_context_up(unsigned int id) +{ + struct mm_struct *mm; + int cpu = smp_processor_id(); + + /* Pick up the victim mm */ + mm = context_mm[id]; + + pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); + + /* Mark this mm has having no context anymore */ + mm->context.id = MMU_NO_CONTEXT; + + /* Flush the TLB for that context */ + local_flush_tlb_mm(mm); + + /* XXX This clear should ultimately be part of local_flush_tlb_mm */ + __clear_bit(id, stale_map[cpu]); + + return id; +} + +#ifdef DEBUG_MAP_CONSISTENCY +static void context_check_map(void) +{ + unsigned int id, nrf, nact; + + nrf = nact = 0; + for (id = first_context; id <= last_context; id++) { + int used = test_bit(id, context_map); + if (!used) + nrf++; + if (used != (context_mm[id] != NULL)) + pr_err("MMU: Context %d is %s and MM is %p !\n", + id, used ? "used" : "free", context_mm[id]); + if (context_mm[id] != NULL) + nact += context_mm[id]->context.active; + } + if (nrf != nr_free_contexts) { + pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", + nr_free_contexts, nrf); + nr_free_contexts = nrf; + } + if (nact > num_online_cpus()) + pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", + nact, num_online_cpus()); + if (first_context > 0 && !test_bit(0, context_map)) + pr_err("MMU: Context 0 has been freed !!!\n"); +} +#else +static void context_check_map(void) { } +#endif + +void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned int id, cpu = smp_processor_id(); + unsigned long *map; + + /* No lockless fast path .. yet */ + spin_lock(&context_lock); + +#ifndef DEBUG_STEAL_ONLY + pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n", + cpu, next, next->context.active, next->context.id); +#endif + +#ifdef CONFIG_SMP + /* Mark us active and the previous one not anymore */ + next->context.active++; + if (prev) { +#ifndef DEBUG_STEAL_ONLY + pr_debug(" old context %p active was: %d\n", + prev, prev->context.active); +#endif + WARN_ON(prev->context.active < 1); + prev->context.active--; + } +#endif /* CONFIG_SMP */ + + /* If we already have a valid assigned context, skip all that */ + id = next->context.id; + if (likely(id != MMU_NO_CONTEXT)) + goto ctxt_ok; + + /* We really don't have a context, let's try to acquire one */ + id = next_context; + if (id > last_context) + id = first_context; + map = context_map; + + /* No more free contexts, let's try to steal one */ + if (nr_free_contexts == 0) { +#ifdef CONFIG_SMP + if (num_online_cpus() > 1) { + id = steal_context_smp(id); + goto stolen; + } +#endif /* CONFIG_SMP */ + id = steal_context_up(id); + goto stolen; + } + nr_free_contexts--; + + /* We know there's at least one free context, try to find it */ + while (__test_and_set_bit(id, map)) { + id = find_next_zero_bit(map, last_context+1, id); + if (id > last_context) + id = first_context; + } + stolen: + next_context = id + 1; + context_mm[id] = next; + next->context.id = id; + +#ifndef DEBUG_STEAL_ONLY + pr_debug("[%d] picked up new id %d, nrf is now %d\n", + cpu, id, nr_free_contexts); +#endif + + context_check_map(); + ctxt_ok: + + /* If that context got marked stale on this CPU, then flush the + * local TLB for it and unmark it before we use it + */ + if (test_bit(id, stale_map[cpu])) { + pr_debug("[%d] flushing stale context %d for mm @%p !\n", + cpu, id, next); + local_flush_tlb_mm(next); + + /* XXX This clear should ultimately be part of local_flush_tlb_mm */ + __clear_bit(id, stale_map[cpu]); + } + + /* Flick the MMU and release lock */ + set_context(id, next->pgd); + spin_unlock(&context_lock); +} + +/* + * Set up the context for a new address space. + */ +int init_new_context(struct task_struct *t, struct mm_struct *mm) +{ + mm->context.id = MMU_NO_CONTEXT; + mm->context.active = 0; + + return 0; +} + +/* + * We're finished using the context for an address space. + */ +void destroy_context(struct mm_struct *mm) +{ + unsigned int id; + + if (mm->context.id == MMU_NO_CONTEXT) + return; + + WARN_ON(mm->context.active != 0); + + spin_lock(&context_lock); + id = mm->context.id; + if (id != MMU_NO_CONTEXT) { + __clear_bit(id, context_map); + mm->context.id = MMU_NO_CONTEXT; +#ifdef DEBUG_MAP_CONSISTENCY + mm->context.active = 0; + context_mm[id] = NULL; +#endif + nr_free_contexts++; + } + spin_unlock(&context_lock); +} + +#ifdef CONFIG_SMP + +static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + + /* We don't touch CPU 0 map, it's allocated at aboot and kept + * around forever + */ + if (cpu == 0) + return NOTIFY_OK; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu); + stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_DEAD: + case CPU_DEAD_FROZEN: + pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu); + kfree(stale_map[cpu]); + stale_map[cpu] = NULL; + break; +#endif + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { + .notifier_call = mmu_context_cpu_notify, +}; + +#endif /* CONFIG_SMP */ + +/* + * Initialize the context management stuff. + */ +void __init mmu_context_init(void) +{ + /* Mark init_mm as being active on all possible CPUs since + * we'll get called with prev == init_mm the first time + * we schedule on a given CPU + */ + init_mm.context.active = NR_CPUS; + + /* + * The MPC8xx has only 16 contexts. We rotate through them on each + * task switch. A better way would be to keep track of tasks that + * own contexts, and implement an LRU usage. That way very active + * tasks don't always have to pay the TLB reload overhead. The + * kernel pages are mapped shared, so the kernel can run on behalf + * of any task that makes a kernel entry. Shared does not mean they + * are not protected, just that the ASID comparison is not performed. + * -- Dan + * + * The IBM4xx has 256 contexts, so we can just rotate through these + * as a way of "switching" contexts. If the TID of the TLB is zero, + * the PID/TID comparison is disabled, so we can use a TID of zero + * to represent all kernel pages as shared among all contexts. + * -- Dan + */ + if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { + first_context = 0; + last_context = 15; + } else { + first_context = 1; + last_context = 255; + } + +#ifdef DEBUG_CLAMP_LAST_CONTEXT + last_context = DEBUG_CLAMP_LAST_CONTEXT; +#endif + /* + * Allocate the maps used by context management + */ + context_map = alloc_bootmem(CTX_MAP_SIZE); + context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); + stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); + +#ifdef CONFIG_SMP + register_cpu_notifier(&mmu_context_cpu_nb); +#endif + + printk(KERN_INFO + "MMU: Allocated %d bytes of context maps for %d contexts\n", + 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), + last_context - first_context + 1); + + /* + * Some processors have too few contexts to reserve one for + * init_mm, and require using context 0 for a normal task. + * Other processors reserve the use of context zero for the kernel. + * This code assumes first_context < 32. + */ + context_map[0] = (1 << first_context) - 1; + next_context = first_context; + nr_free_contexts = last_context - first_context + 1; +} + diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index fab3cfad4099..4314b39b6faf 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -22,10 +22,58 @@ #include <asm/tlbflush.h> #include <asm/mmu.h> +#ifdef CONFIG_PPC_MMU_NOHASH + +/* + * On 40x and 8xx, we directly inline tlbia and tlbivax + */ +#if defined(CONFIG_40x) || defined(CONFIG_8xx) +static inline void _tlbil_all(void) +{ + asm volatile ("sync; tlbia; isync" : : : "memory") +} +static inline void _tlbil_pid(unsigned int pid) +{ + asm volatile ("sync; tlbia; isync" : : : "memory") +} +#else /* CONFIG_40x || CONFIG_8xx */ +extern void _tlbil_all(void); +extern void _tlbil_pid(unsigned int pid); +#endif /* !(CONFIG_40x || CONFIG_8xx) */ + +/* + * On 8xx, we directly inline tlbie, on others, it's extern + */ +#ifdef CONFIG_8xx +static inline void _tlbil_va(unsigned long address, unsigned int pid) +{ + asm volatile ("tlbie %0; sync" : : "r" (address) : "memory") +} +#else /* CONFIG_8xx */ +extern void _tlbil_va(unsigned long address, unsigned int pid); +#endif /* CONIFG_8xx */ + +/* + * As of today, we don't support tlbivax broadcast on any + * implementation. When that becomes the case, this will be + * an extern. + */ +static inline void _tlbivax_bcast(unsigned long address, unsigned int pid) +{ + BUG(); +} + +#else /* CONFIG_PPC_MMU_NOHASH */ + extern void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap); +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +#endif /* CONFIG_PPC_MMU_NOHASH */ + #ifdef CONFIG_PPC32 extern void mapin_ram(void); extern int map_page(unsigned long va, phys_addr_t pa, int flags); @@ -58,17 +106,14 @@ extern phys_addr_t lowmem_end_addr; * architectures. -- Dan */ #if defined(CONFIG_8xx) -#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */) #define MMU_init_hw() do { } while(0) #define mmu_mapin_ram() (0UL) #elif defined(CONFIG_4xx) -#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); #elif defined(CONFIG_FSL_BOOKE) -#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); extern void adjust_total_lowmem(void); @@ -77,18 +122,4 @@ extern void adjust_total_lowmem(void); /* anything 32-bit except 4xx or 8xx */ extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); - -/* Be careful....this needs to be updated if we ever encounter 603 SMPs, - * which includes all new 82xx processors. We need tlbie/tlbsync here - * in that case (I think). -- Dan. - */ -static inline void flush_HPTE(unsigned context, unsigned long va, - unsigned long pdval) -{ - if ((Hash != 0) && - cpu_has_feature(CPU_FTR_HPTE_TABLE)) - flush_hash_pages(0, va, pdval, 1); - else - _tlbie(va); -} #endif diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c new file mode 100644 index 000000000000..6d94116fdea1 --- /dev/null +++ b/arch/powerpc/mm/pgtable.c @@ -0,0 +1,117 @@ +/* + * This file contains common routines for dealing with free of page tables + * + * Derived from arch/powerpc/mm/tlb_64.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * Dave Engebretsen <engebret@us.ibm.com> + * Rework for PPC64 port. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/tlb.h> + +static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); +static unsigned long pte_freelist_forced_free; + +struct pte_freelist_batch +{ + struct rcu_head rcu; + unsigned int index; + pgtable_free_t tables[0]; +}; + +#define PTE_FREELIST_SIZE \ + ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ + / sizeof(pgtable_free_t)) + +static void pte_free_smp_sync(void *arg) +{ + /* Do nothing, just ensure we sync with all CPUs */ +} + +/* This is only called when we are critically out of memory + * (and fail to get a page in pte_free_tlb). + */ +static void pgtable_free_now(pgtable_free_t pgf) +{ + pte_freelist_forced_free++; + + smp_call_function(pte_free_smp_sync, NULL, 1); + + pgtable_free(pgf); +} + +static void pte_free_rcu_callback(struct rcu_head *head) +{ + struct pte_freelist_batch *batch = + container_of(head, struct pte_freelist_batch, rcu); + unsigned int i; + + for (i = 0; i < batch->index; i++) + pgtable_free(batch->tables[i]); + + free_page((unsigned long)batch); +} + +static void pte_free_submit(struct pte_freelist_batch *batch) +{ + INIT_RCU_HEAD(&batch->rcu); + call_rcu(&batch->rcu, pte_free_rcu_callback); +} + +void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) +{ + /* This is safe since tlb_gather_mmu has disabled preemption */ + cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); + struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + + if (atomic_read(&tlb->mm->mm_users) < 2 || + cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { + pgtable_free(pgf); + return; + } + + if (*batchp == NULL) { + *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); + if (*batchp == NULL) { + pgtable_free_now(pgf); + return; + } + (*batchp)->index = 0; + } + (*batchp)->tables[(*batchp)->index++] = pgf; + if ((*batchp)->index == PTE_FREELIST_SIZE) { + pte_free_submit(*batchp); + *batchp = NULL; + } +} + +void pte_free_finish(void) +{ + /* This is safe since tlb_gather_mmu has disabled preemption */ + struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + + if (*batchp == NULL) + return; + pte_free_submit(*batchp); + *batchp = NULL; +} diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index c31d6d26f0b5..38ff35f2142a 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ extern char etext[], _stext[]; -#ifdef CONFIG_SMP -extern void hash_page_sync(void); -#endif - #ifdef HAVE_BATS extern phys_addr_t v_mapped_by_bats(unsigned long va); extern unsigned long p_mapped_by_bats(phys_addr_t pa); @@ -72,24 +68,29 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa); #define p_mapped_by_tlbcam(x) (0UL) #endif /* HAVE_TLBCAM */ -#ifdef CONFIG_PTE_64BIT -/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */ -#define PGDIR_ORDER 1 -#else -#define PGDIR_ORDER 0 -#endif +#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; - ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); + /* pgdir take page or two with 4K pages and a page fraction otherwise */ +#ifndef CONFIG_PPC_4K_PAGES + ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); +#else + ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, + PGDIR_ORDER - PAGE_SHIFT); +#endif return ret; } void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGDIR_ORDER); +#ifndef CONFIG_PPC_4K_PAGES + kfree((void *)pgd); +#else + free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); +#endif } __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) @@ -125,23 +126,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) return ptepage; } -void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ -#ifdef CONFIG_SMP - hash_page_sync(); -#endif - free_page((unsigned long)pte); -} - -void pte_free(struct mm_struct *mm, pgtable_t ptepage) -{ -#ifdef CONFIG_SMP - hash_page_sync(); -#endif - pgtable_page_dtor(ptepage); - __free_page(ptepage); -} - void __iomem * ioremap(phys_addr_t addr, unsigned long size) { @@ -194,6 +178,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) if (p < 16*1024*1024) p += _ISA_MEM_BASE; +#ifndef CONFIG_CRASH_DUMP /* * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. @@ -203,6 +188,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) (unsigned long long)p, __builtin_return_address(0)); return NULL; } +#endif if (size == 0) return NULL; @@ -288,7 +274,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) } /* - * Map in a big chunk of physical memory starting at KERNELBASE. + * Map in a big chunk of physical memory starting at PAGE_OFFSET. */ void __init mapin_ram(void) { @@ -297,7 +283,7 @@ void __init mapin_ram(void) int ktext; s = mmu_mapin_ram(); - v = KERNELBASE + s; + v = PAGE_OFFSET + s; p = memstart_addr + s; for (; s < total_lowmem; s += PAGE_SIZE) { ktext = ((char *) v >= _stext && (char *) v < etext); @@ -363,7 +349,11 @@ static int __change_page_attr(struct page *page, pgprot_t prot) return -EINVAL; set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); wmb(); - flush_HPTE(0, address, pmd_val(*kpmd)); +#ifdef CONFIG_PPC_STD_MMU + flush_hash_pages(0, address, pmd_val(*kpmd), 1); +#else + flush_tlb_page(NULL, address); +#endif pte_unmap(kpte); return 0; @@ -400,7 +390,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) #endif /* CONFIG_DEBUG_PAGEALLOC */ static int fixmaps; -unsigned long FIXADDR_TOP = 0xfffff000; +unsigned long FIXADDR_TOP = (-PAGE_SIZE); EXPORT_SYMBOL(FIXADDR_TOP); void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 6aa120813775..45d925360b89 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void) break; } - setbat(2, KERNELBASE, 0, bl, _PAGE_RAM); - done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; + setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM); + done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1; if ((done < tot) && !bat_addrs[3].limit) { /* use BAT3 to cover a bit more */ tot -= done; for (bl = 128<<10; bl < max_size; bl <<= 1) if (bl * 2 > tot) break; - setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM); - done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; + setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM); + done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1; } return done; @@ -192,7 +192,7 @@ void __init MMU_init_hw(void) extern unsigned int hash_page[]; extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; - if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) { + if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) { /* * Put a blr (procedure return) instruction at the * start of hash_page, since we can still get DSI diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_hash32.c index f9a47fee3927..65190587a365 100644 --- a/arch/powerpc/mm/tlb_32.c +++ b/arch/powerpc/mm/tlb_hash32.c @@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) flush_range(&init_mm, start, end); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_kernel_range); /* * Flush all the (user) entries for the address space described by mm. @@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm) flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_mm); void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { @@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_page); /* * For each address in the range, find the pte for the address @@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, flush_range(vma->vm_mm, start, end); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_range); diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_hash64.c index be7dd422c0fa..c931bc7d1079 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); * arch/powerpc/include/asm/tlb.h file -- tgall */ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); -static unsigned long pte_freelist_forced_free; - -struct pte_freelist_batch -{ - struct rcu_head rcu; - unsigned int index; - pgtable_free_t tables[0]; -}; - -#define PTE_FREELIST_SIZE \ - ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ - / sizeof(pgtable_free_t)) - -static void pte_free_smp_sync(void *arg) -{ - /* Do nothing, just ensure we sync with all CPUs */ -} - -/* This is only called when we are critically out of memory - * (and fail to get a page in pte_free_tlb). - */ -static void pgtable_free_now(pgtable_free_t pgf) -{ - pte_freelist_forced_free++; - - smp_call_function(pte_free_smp_sync, NULL, 1); - - pgtable_free(pgf); -} - -static void pte_free_rcu_callback(struct rcu_head *head) -{ - struct pte_freelist_batch *batch = - container_of(head, struct pte_freelist_batch, rcu); - unsigned int i; - - for (i = 0; i < batch->index; i++) - pgtable_free(batch->tables[i]); - - free_page((unsigned long)batch); -} - -static void pte_free_submit(struct pte_freelist_batch *batch) -{ - INIT_RCU_HEAD(&batch->rcu); - call_rcu(&batch->rcu, pte_free_rcu_callback); -} - -void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) -{ - /* This is safe since tlb_gather_mmu has disabled preemption */ - cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); - - if (atomic_read(&tlb->mm->mm_users) < 2 || - cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { - pgtable_free(pgf); - return; - } - - if (*batchp == NULL) { - *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); - if (*batchp == NULL) { - pgtable_free_now(pgf); - return; - } - (*batchp)->index = 0; - } - (*batchp)->tables[(*batchp)->index++] = pgf; - if ((*batchp)->index == PTE_FREELIST_SIZE) { - pte_free_submit(*batchp); - *batchp = NULL; - } -} /* * A linux PTE was changed and the corresponding hash table entry @@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) batch->index = 0; } -void pte_free_finish(void) -{ - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); - - if (*batchp == NULL) - return; - pte_free_submit(*batchp); - *batchp = NULL; -} - /** * __flush_hash_table_range - Flush all HPTEs for a given address range * from the hash table (and the TLB). But keeps diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c new file mode 100644 index 000000000000..803a64c02b06 --- /dev/null +++ b/arch/powerpc/mm/tlb_nohash.c @@ -0,0 +1,209 @@ +/* + * This file contains the routines for TLB flushing. + * On machines where the MMU does not use a hash table to store virtual to + * physical translations (ie, SW loaded TLBs or Book3E compilant processors, + * this does -not- include 603 however which shares the implementation with + * hash based processors) + * + * -- BenH + * + * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> + * IBM Corp. + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> +#include <linux/preempt.h> +#include <linux/spinlock.h> + +#include <asm/tlbflush.h> +#include <asm/tlb.h> + +#include "mmu_decl.h" + +/* + * Base TLB flushing operations: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes kernel pages + * + * - local_* variants of page and mm only apply to the current + * processor + */ + +/* + * These are the base non-SMP variants of page and mm flushing + */ +void local_flush_tlb_mm(struct mm_struct *mm) +{ + unsigned int pid; + + preempt_disable(); + pid = mm->context.id; + if (pid != MMU_NO_CONTEXT) + _tlbil_pid(pid); + preempt_enable(); +} +EXPORT_SYMBOL(local_flush_tlb_mm); + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + unsigned int pid; + + preempt_disable(); + pid = vma ? vma->vm_mm->context.id : 0; + if (pid != MMU_NO_CONTEXT) + _tlbil_va(vmaddr, pid); + preempt_enable(); +} +EXPORT_SYMBOL(local_flush_tlb_page); + + +/* + * And here are the SMP non-local implementations + */ +#ifdef CONFIG_SMP + +static DEFINE_SPINLOCK(tlbivax_lock); + +struct tlb_flush_param { + unsigned long addr; + unsigned int pid; +}; + +static void do_flush_tlb_mm_ipi(void *param) +{ + struct tlb_flush_param *p = param; + + _tlbil_pid(p ? p->pid : 0); +} + +static void do_flush_tlb_page_ipi(void *param) +{ + struct tlb_flush_param *p = param; + + _tlbil_va(p->addr, p->pid); +} + + +/* Note on invalidations and PID: + * + * We snapshot the PID with preempt disabled. At this point, it can still + * change either because: + * - our context is being stolen (PID -> NO_CONTEXT) on another CPU + * - we are invaliating some target that isn't currently running here + * and is concurrently acquiring a new PID on another CPU + * - some other CPU is re-acquiring a lost PID for this mm + * etc... + * + * However, this shouldn't be a problem as we only guarantee + * invalidation of TLB entries present prior to this call, so we + * don't care about the PID changing, and invalidating a stale PID + * is generally harmless. + */ + +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t cpu_mask; + unsigned int pid; + + preempt_disable(); + pid = mm->context.id; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto no_context; + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) { + struct tlb_flush_param p = { .pid = pid }; + smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1); + } + _tlbil_pid(pid); + no_context: + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_mm); + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + cpumask_t cpu_mask; + unsigned int pid; + + preempt_disable(); + pid = vma ? vma->vm_mm->context.id : 0; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto bail; + cpu_mask = vma->vm_mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) { + /* If broadcast tlbivax is supported, use it */ + if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { + int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); + if (lock) + spin_lock(&tlbivax_lock); + _tlbivax_bcast(vmaddr, pid); + if (lock) + spin_unlock(&tlbivax_lock); + goto bail; + } else { + struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; + smp_call_function_mask(cpu_mask, + do_flush_tlb_page_ipi, &p, 1); + } + } + _tlbil_va(vmaddr, pid); + bail: + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +#endif /* CONFIG_SMP */ + +/* + * Flush kernel TLB entries in the given range + */ +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); + _tlbil_pid(0); + preempt_enable(); +#endif + _tlbil_pid(0); +} +EXPORT_SYMBOL(flush_tlb_kernel_range); + +/* + * Currently, for range flushing, we just do a full mm flush. This should + * be optimized based on a threshold on the size of the range, since + * some implementation can stack multiple tlbivax before a tlbsync but + * for now, we keep it that way + */ +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) + +{ + flush_tlb_mm(vma->vm_mm); +} +EXPORT_SYMBOL(flush_tlb_range); diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S new file mode 100644 index 000000000000..f900a39e6ec4 --- /dev/null +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -0,0 +1,166 @@ +/* + * This file contains low-level functions for performing various + * types of TLB invalidations on various processors with no hash + * table. + * + * This file implements the following functions for all no-hash + * processors. Some aren't implemented for some variants. Some + * are inline in tlbflush.h + * + * - tlbil_va + * - tlbil_pid + * - tlbil_all + * - tlbivax_bcast (not yet) + * + * Code mostly moved over from misc_32.S + * + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Partially rewritten by Cort Dougan (cort@cs.nmt.edu) + * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <asm/reg.h> +#include <asm/page.h> +#include <asm/cputable.h> +#include <asm/mmu.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> +#include <asm/processor.h> + +#if defined(CONFIG_40x) + +/* + * 40x implementation needs only tlbil_va + */ +_GLOBAL(_tlbil_va) + /* We run the search with interrupts disabled because we have to change + * the PID and I don't want to preempt when that happens. + */ + mfmsr r5 + mfspr r6,SPRN_PID + wrteei 0 + mtspr SPRN_PID,r4 + tlbsx. r3, 0, r3 + mtspr SPRN_PID,r6 + wrtee r5 + bne 1f + sync + /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is + * clear. Since 25 is the V bit in the TLB_TAG, loading this value + * will invalidate the TLB entry. */ + tlbwe r3, r3, TLB_TAG + isync +1: blr + +#elif defined(CONFIG_8xx) + +/* + * Nothing to do for 8xx, everything is inline + */ + +#elif defined(CONFIG_44x) + +/* + * 440 implementation uses tlbsx/we for tlbil_va and a full sweep + * of the TLB for everything else. + */ +_GLOBAL(_tlbil_va) + mfspr r5,SPRN_MMUCR + rlwimi r5,r4,0,24,31 /* Set TID */ + + /* We have to run the search with interrupts disabled, otherwise + * an interrupt which causes a TLB miss can clobber the MMUCR + * between the mtspr and the tlbsx. + * + * Critical and Machine Check interrupts take care of saving + * and restoring MMUCR, so only normal interrupts have to be + * taken care of. + */ + mfmsr r4 + wrteei 0 + mtspr SPRN_MMUCR,r5 + tlbsx. r3, 0, r3 + wrtee r4 + bne 1f + sync + /* There are only 64 TLB entries, so r3 < 64, + * which means bit 22, is clear. Since 22 is + * the V bit in the TLB_PAGEID, loading this + * value will invalidate the TLB entry. + */ + tlbwe r3, r3, PPC44x_TLB_PAGEID + isync +1: blr + +_GLOBAL(_tlbil_all) +_GLOBAL(_tlbil_pid) + li r3,0 + sync + + /* Load high watermark */ + lis r4,tlb_44x_hwater@ha + lwz r5,tlb_44x_hwater@l(r4) + +1: tlbwe r3,r3,PPC44x_TLB_PAGEID + addi r3,r3,1 + cmpw 0,r3,r5 + ble 1b + + isync + blr + +#elif defined(CONFIG_FSL_BOOKE) +/* + * FSL BookE implementations. Currently _pid and _all are the + * same. This will change when tlbilx is actually supported and + * performs invalidate-by-PID. This change will be driven by + * mmu_features conditional + */ + +/* + * Flush MMU TLB on the local processor + */ +_GLOBAL(_tlbil_pid) +_GLOBAL(_tlbil_all) +#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ + MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) + li r3,(MMUCSR0_TLBFI)@l + mtspr SPRN_MMUCSR0, r3 +1: + mfspr r3,SPRN_MMUCSR0 + andi. r3,r3,MMUCSR0_TLBFI@l + bne 1b + msync + isync + blr + +/* + * Flush MMU TLB for a particular address, but only on the local processor + * (no broadcast) + */ +_GLOBAL(_tlbil_va) + mfmsr r10 + wrteei 0 + slwi r4,r4,16 + mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ + tlbsx 0,r3 + mfspr r4,SPRN_MAS1 /* check valid */ + andis. r3,r4,MAS1_VALID@h + beq 1f + rlwinm r4,r4,0,1,31 + mtspr SPRN_MAS1,r4 + tlbwe + msync + isync +1: wrtee r10 + blr +#elif +#error Unsupported processor type ! +#endif diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c index ae2e7f67c18e..4058fd1e7fc7 100644 --- a/arch/powerpc/platforms/40x/ep405.c +++ b/arch/powerpc/platforms/40x/ep405.c @@ -100,7 +100,7 @@ static void __init ep405_setup_arch(void) /* Find & init the BCSR CPLD */ ep405_init_bcsr(); - ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); } static int __init ep405_probe(void) diff --git a/arch/powerpc/platforms/40x/kilauea.c b/arch/powerpc/platforms/40x/kilauea.c index 1dd24ffc0dc1..fd7d934dac8b 100644 --- a/arch/powerpc/platforms/40x/kilauea.c +++ b/arch/powerpc/platforms/40x/kilauea.c @@ -44,7 +44,7 @@ static int __init kilauea_probe(void) if (!of_flat_dt_is_compatible(root, "amcc,kilauea")) return 0; - ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); return 1; } diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index 4498a86b46c3..f40ac9b8f99f 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c @@ -61,7 +61,7 @@ static int __init ppc40x_probe(void) for (i = 0; i < ARRAY_SIZE(board); i++) { if (of_flat_dt_is_compatible(root, board[i])) { - ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); return 1; } } diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c index a0e8fe4662f6..88b9117fa691 100644 --- a/arch/powerpc/platforms/44x/ebony.c +++ b/arch/powerpc/platforms/44x/ebony.c @@ -54,7 +54,7 @@ static int __init ebony_probe(void) if (!of_flat_dt_is_compatible(root, "ibm,ebony")) return 0; - ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); return 1; } diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 29671262801f..76fdc51dac8b 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c @@ -69,7 +69,7 @@ static int __init ppc44x_probe(void) for (i = 0; i < ARRAY_SIZE(board); i++) { if (of_flat_dt_is_compatible(root, board[i])) { - ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); return 1; } } diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c index 47f10e647735..a78e8eb6da41 100644 --- a/arch/powerpc/platforms/44x/sam440ep.c +++ b/arch/powerpc/platforms/44x/sam440ep.c @@ -51,7 +51,7 @@ static int __init sam440ep_probe(void) if (!of_flat_dt_is_compatible(root, "acube,sam440ep")) return 0; - ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); return 1; } diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index fe92e65103ed..b5c753db125e 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -3,7 +3,6 @@ #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> -#include "mpc52xx_pic.h" /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c index b49a18527661..c3f2c21024e3 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c @@ -375,7 +375,7 @@ mpc52xx_add_bridge(struct device_node *node) pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name); - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); if (of_address_to_resource(node, 0, &rsrc) != 0) { printk(KERN_ERR "Can't get %s resources\n", node->full_name); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 8479394e9ab4..72865e8e4b51 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -2,20 +2,100 @@ * * Programmable Interrupt Controller functions for the Freescale MPC52xx. * + * Copyright (C) 2008 Secret Lab Technologies Ltd. * Copyright (C) 2006 bplan GmbH + * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2003 Montavista Software, Inc * * Based on the code from the 2.4 kernel by * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg. * - * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2003 Montavista Software, Inc - * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ +/* + * This is the device driver for the MPC5200 interrupt controller. + * + * hardware overview + * ----------------- + * The MPC5200 interrupt controller groups the all interrupt sources into + * three groups called 'critical', 'main', and 'peripheral'. The critical + * group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep + * sleep. Main group include the other 3 external IRQs, slice timer 1, RTC, + * gpios, and the general purpose timers. Peripheral group contains the + * remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet, + * USB, DMA, etc). + * + * virqs + * ----- + * The Linux IRQ subsystem requires that each irq source be assigned a + * system wide unique IRQ number starting at 1 (0 means no irq). Since + * systems can have multiple interrupt controllers, the virtual IRQ (virq) + * infrastructure lets each interrupt controller to define a local set + * of IRQ numbers and the virq infrastructure maps those numbers into + * a unique range of the global IRQ# space. + * + * To define a range of virq numbers for this controller, this driver first + * assigns a number to each of the irq groups (called the level 1 or L1 + * value). Within each group individual irq sources are also assigned a + * number, as defined by the MPC5200 user guide, and refers to it as the + * level 2 or L2 value. The virq number is determined by shifting up the + * L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value. + * + * For example, the TMR0 interrupt is irq 9 in the main group. The + * virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9). + * + * The observant reader will also notice that this driver defines a 4th + * interrupt group called 'bestcomm'. The bestcomm group isn't physically + * part of the MPC5200 interrupt controller, but it is used here to assign + * a separate virq number for each bestcomm task (since any of the 16 + * bestcomm tasks can cause the bestcomm interrupt to be raised). When a + * bestcomm interrupt occurs (peripheral group, irq 0) this driver determines + * which task needs servicing and returns the irq number for that task. This + * allows drivers which use bestcomm to define their own interrupt handlers. + * + * irq_chip structures + * ------------------- + * For actually manipulating IRQs (masking, enabling, clearing, etc) this + * driver defines four separate 'irq_chip' structures, one for the main + * group, one for the peripherals group, one for the bestcomm group and one + * for external interrupts. The irq_chip structures provide the hooks needed + * to manipulate each IRQ source, and since each group is has a separate set + * of registers for controlling the irq, it makes sense to divide up the + * hooks along those lines. + * + * You'll notice that there is not an irq_chip for the critical group and + * you'll also notice that there is an irq_chip defined for external + * interrupts even though there is no external interrupt group. The reason + * for this is that the four external interrupts are all managed with the same + * register even though one of the external IRQs is in the critical group and + * the other three are in the main group. For this reason it makes sense for + * the 4 external irqs to be managed using a separate set of hooks. The + * reason there is no crit irq_chip is that of the 3 irqs in the critical + * group, only external interrupt is actually support at this time by this + * driver and since external interrupt is the only one used, it can just + * be directed to make use of the external irq irq_chip. + * + * device tree bindings + * -------------------- + * The device tree bindings for this controller reflect the two level + * organization of irqs in the device. #interrupt-cells = <3> where the + * first cell is the group number [0..3], the second cell is the irq + * number in the group, and the third cell is the sense type (level/edge). + * For reference, the following is a list of the interrupt property values + * associated with external interrupt sources on the MPC5200 (just because + * it is non-obvious to determine what the interrupts property should be + * when reading the mpc5200 manual and it is a frequently asked question). + * + * External interrupts: + * <0 0 n> external irq0, n is sense (n=0: level high, + * <1 1 n> external irq1, n is sense n=1: edge rising, + * <1 2 n> external irq2, n is sense n=2: edge falling, + * <1 3 n> external irq3, n is sense n=3: level low) + */ #undef DEBUG #include <linux/interrupt.h> @@ -24,11 +104,19 @@ #include <asm/io.h> #include <asm/prom.h> #include <asm/mpc52xx.h> -#include "mpc52xx_pic.h" -/* - * -*/ +/* HW IRQ mapping */ +#define MPC52xx_IRQ_L1_CRIT (0) +#define MPC52xx_IRQ_L1_MAIN (1) +#define MPC52xx_IRQ_L1_PERP (2) +#define MPC52xx_IRQ_L1_SDMA (3) + +#define MPC52xx_IRQ_L1_OFFSET (6) +#define MPC52xx_IRQ_L1_MASK (0x00c0) +#define MPC52xx_IRQ_L2_MASK (0x003f) + +#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0) + /* MPC5200 device tree match tables */ static struct of_device_id mpc52xx_pic_ids[] __initdata = { @@ -53,10 +141,7 @@ static unsigned char mpc52xx_map_senses[4] = { IRQ_TYPE_LEVEL_LOW, }; -/* - * -*/ - +/* Utility functions */ static inline void io_be_setbit(u32 __iomem *addr, int bitno) { out_be32(addr, in_be32(addr) | (1 << bitno)); @@ -69,15 +154,14 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno) /* * IRQ[0-3] interrupt irq_chip -*/ - + */ static void mpc52xx_extirq_mask(unsigned int virq) { int irq; int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -90,7 +174,7 @@ static void mpc52xx_extirq_unmask(unsigned int virq) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -103,7 +187,7 @@ static void mpc52xx_extirq_ack(unsigned int virq) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -117,7 +201,7 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type); @@ -156,15 +240,14 @@ static struct irq_chip mpc52xx_extirq_irqchip = { /* * Main interrupt irq_chip -*/ - + */ static void mpc52xx_main_mask(unsigned int virq) { int irq; int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -177,7 +260,7 @@ static void mpc52xx_main_unmask(unsigned int virq) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -193,15 +276,14 @@ static struct irq_chip mpc52xx_main_irqchip = { /* * Peripherals interrupt irq_chip -*/ - + */ static void mpc52xx_periph_mask(unsigned int virq) { int irq; int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -214,7 +296,7 @@ static void mpc52xx_periph_unmask(unsigned int virq) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -230,15 +312,14 @@ static struct irq_chip mpc52xx_periph_irqchip = { /* * SDMA interrupt irq_chip -*/ - + */ static void mpc52xx_sdma_mask(unsigned int virq) { int irq; int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -251,7 +332,7 @@ static void mpc52xx_sdma_unmask(unsigned int virq) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -264,7 +345,7 @@ static void mpc52xx_sdma_ack(unsigned int virq) int l2irq; irq = irq_map[virq].hwirq; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); @@ -278,13 +359,12 @@ static struct irq_chip mpc52xx_sdma_irqchip = { .ack = mpc52xx_sdma_ack, }; -/* - * irq_host -*/ - +/** + * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property + */ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, - u32 * intspec, unsigned int intsize, - irq_hw_number_t * out_hwirq, + u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) { int intrvect_l1; @@ -299,10 +379,9 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, intrvect_l2 = (int)intspec[1]; intrvect_type = (int)intspec[2]; - intrvect_linux = - (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK; - intrvect_linux |= - (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK; + intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & + MPC52xx_IRQ_L1_MASK; + intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK; pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1, intrvect_l2); @@ -313,11 +392,11 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, return 0; } -/* - * this function retrieves the correct IRQ type out - * of the MPC regs - * Only externals IRQs needs this -*/ +/** + * mpc52xx_irqx_gettype - determine the IRQ sense type (level/edge) + * + * Only external IRQs need this. + */ static int mpc52xx_irqx_gettype(int irq) { int type; @@ -329,6 +408,9 @@ static int mpc52xx_irqx_gettype(int irq) return mpc52xx_map_senses[type]; } +/** + * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure + */ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, irq_hw_number_t irq) { @@ -339,7 +421,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, int type; l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET; - l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; + l2irq = irq & MPC52xx_IRQ_L2_MASK; /* * Most of ours IRQs will be level low @@ -379,8 +461,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, break; default: - pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq); - printk(KERN_ERR "Unknow IRQ!\n"); + pr_err("%s: invalid virq requested (0x%x)\n", __func__, virq); return -EINVAL; } @@ -406,10 +487,15 @@ static struct irq_host_ops mpc52xx_irqhost_ops = { .map = mpc52xx_irqhost_map, }; -/* - * init (public) -*/ - +/** + * mpc52xx_init_irq - Initialize and register with the virq subsystem + * + * Hook for setting up IRQs on an mpc5200 system. A pointer to this function + * is to be put into the machine definition structure. + * + * This function searches the device tree for an MPC5200 interrupt controller, + * initializes it, and registers it with the virq subsystem. + */ void __init mpc52xx_init_irq(void) { u32 intr_ctrl; @@ -454,7 +540,6 @@ void __init mpc52xx_init_irq(void) * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ - mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, -1); @@ -462,12 +547,38 @@ void __init mpc52xx_init_irq(void) if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); - printk(KERN_INFO "MPC52xx PIC is up and running!\n"); + irq_set_default_host(mpc52xx_irqhost); + + pr_info("MPC52xx PIC is up and running!\n"); } -/* - * get_irq (public) -*/ +/** + * mpc52xx_get_irq - Get pending interrupt number hook function + * + * Called by the interupt handler to determine what IRQ handler needs to be + * executed. + * + * Status of pending interrupts is determined by reading the encoded status + * register. The encoded status register has three fields; one for each of the + * types of interrupts defined by the controller - 'critical', 'main' and + * 'peripheral'. This function reads the status register and returns the IRQ + * number associated with the highest priority pending interrupt. 'Critical' + * interrupts have the highest priority, followed by 'main' interrupts, and + * then 'peripheral'. + * + * The mpc5200 interrupt controller can be configured to boost the priority + * of individual 'peripheral' interrupts. If this is the case then a special + * value will appear in either the crit or main fields indicating a high + * or medium priority peripheral irq has occurred. + * + * This function checks each of the 3 irq request fields and returns the + * first pending interrupt that it finds. + * + * This function also identifies a 4th type of interrupt; 'bestcomm'. Each + * bestcomm DMA task can raise the bestcomm peripheral interrupt. When this + * occurs at task-specific IRQ# is decoded so that each task can have its + * own IRQ handler. + */ unsigned int mpc52xx_get_irq(void) { u32 status; @@ -478,25 +589,21 @@ unsigned int mpc52xx_get_irq(void) irq = (status >> 8) & 0x3; if (irq == 2) /* high priority peripheral */ goto peripheral; - irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) & - MPC52xx_IRQ_L1_MASK; + irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET); } else if (status & 0x00200000) { /* main */ irq = (status >> 16) & 0x1f; if (irq == 4) /* low priority peripheral */ goto peripheral; - irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) & - MPC52xx_IRQ_L1_MASK; + irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET); } else if (status & 0x20000000) { /* peripheral */ peripheral: irq = (status >> 24) & 0x1f; if (irq == 0) { /* bestcomm */ status = in_be32(&sdma->IntPend); irq = ffs(status) - 1; - irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) & - MPC52xx_IRQ_L1_MASK; + irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET); } else { - irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) & - MPC52xx_IRQ_L1_MASK; + irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); } } diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h deleted file mode 100644 index 1a26bcdb3049..000000000000 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Header file for Freescale MPC52xx Interrupt controller - * - * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2003 MontaVista, Software, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__ -#define __POWERPC_SYSDEV_MPC52xx_PIC_H__ - -#include <asm/types.h> - - -/* HW IRQ mapping */ -#define MPC52xx_IRQ_L1_CRIT (0) -#define MPC52xx_IRQ_L1_MAIN (1) -#define MPC52xx_IRQ_L1_PERP (2) -#define MPC52xx_IRQ_L1_SDMA (3) - -#define MPC52xx_IRQ_L1_OFFSET (6) -#define MPC52xx_IRQ_L1_MASK (0x00c0) - -#define MPC52xx_IRQ_L2_OFFSET (0) -#define MPC52xx_IRQ_L2_MASK (0x003f) - -#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0) - - -/* Interrupt controller Register set */ -struct mpc52xx_intr { - u32 per_mask; /* INTR + 0x00 */ - u32 per_pri1; /* INTR + 0x04 */ - u32 per_pri2; /* INTR + 0x08 */ - u32 per_pri3; /* INTR + 0x0c */ - u32 ctrl; /* INTR + 0x10 */ - u32 main_mask; /* INTR + 0x14 */ - u32 main_pri1; /* INTR + 0x18 */ - u32 main_pri2; /* INTR + 0x1c */ - u32 reserved1; /* INTR + 0x20 */ - u32 enc_status; /* INTR + 0x24 */ - u32 crit_status; /* INTR + 0x28 */ - u32 main_status; /* INTR + 0x2c */ - u32 per_status; /* INTR + 0x30 */ - u32 reserved2; /* INTR + 0x34 */ - u32 per_error; /* INTR + 0x38 */ -}; - -#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */ - diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index c72d3304387f..a55b0b6813ed 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c @@ -5,9 +5,6 @@ #include <asm/cacheflush.h> #include <asm/mpc52xx.h> -#include "mpc52xx_pic.h" - - /* these are defined in mpc52xx_sleep.S, and only used here */ extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs, struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*); diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c index 1b75902fad64..9761a59f175f 100644 --- a/arch/powerpc/platforms/82xx/pq2.c +++ b/arch/powerpc/platforms/82xx/pq2.c @@ -53,7 +53,7 @@ static void __init pq2_pci_add_bridge(struct device_node *np) if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b) goto err; - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(np); if (!hose) diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index cb3054e1001d..f0798c09980f 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile @@ -1,6 +1,8 @@ # # Makefile for the PowerPC 85xx linux kernel. # +obj-$(CONFIG_SMP) += smp.o + obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 613bf8c2e30d..a8301c8ad537 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -63,6 +63,7 @@ void __init mpc85xx_ds_pic_init(void) struct device_node *cascade_node = NULL; int cascade_irq; #endif + unsigned long root = of_get_flat_dt_root(); np = of_find_node_by_type(NULL, "open-pic"); if (np == NULL) { @@ -76,11 +77,19 @@ void __init mpc85xx_ds_pic_init(void) return; } - mpic = mpic_alloc(np, r.start, + if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) { + mpic = mpic_alloc(np, r.start, + MPIC_PRIMARY | + MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS, + 0, 256, " OpenPIC "); + } else { + mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); + } + BUG_ON(mpic == NULL); of_node_put(np); diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 2494c5155919..658a36fab3ab 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -231,7 +231,7 @@ static void __init mpc85xx_mds_setup_arch(void) static int __init board_fixups(void) { - char phy_id[BUS_ID_SIZE]; + char phy_id[20]; char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"}; struct device_node *mdio; struct resource res; @@ -241,13 +241,15 @@ static int __init board_fixups(void) mdio = of_find_compatible_node(NULL, NULL, compstrs[i]); of_address_to_resource(mdio, 0, &res); - snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 1); + snprintf(phy_id, sizeof(phy_id), "%llx:%02x", + (unsigned long long)res.start, 1); phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); /* Register a workaround for errata */ - snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 7); + snprintf(phy_id, sizeof(phy_id), "%llx:%02x", + (unsigned long long)res.start, 7); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); of_node_put(mdio); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c new file mode 100644 index 000000000000..d652c713f496 --- /dev/null +++ b/arch/powerpc/platforms/85xx/smp.c @@ -0,0 +1,104 @@ +/* + * Author: Andy Fleming <afleming@freescale.com> + * Kumar Gala <galak@kernel.crashing.org> + * + * Copyright 2006-2008 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/of.h> + +#include <asm/machdep.h> +#include <asm/pgtable.h> +#include <asm/page.h> +#include <asm/mpic.h> +#include <asm/cacheflush.h> + +#include <sysdev/fsl_soc.h> + +extern volatile unsigned long __secondary_hold_acknowledge; +extern void __early_start(void); + +#define BOOT_ENTRY_ADDR_UPPER 0 +#define BOOT_ENTRY_ADDR_LOWER 1 +#define BOOT_ENTRY_R3_UPPER 2 +#define BOOT_ENTRY_R3_LOWER 3 +#define BOOT_ENTRY_RESV 4 +#define BOOT_ENTRY_PIR 5 +#define BOOT_ENTRY_R6_UPPER 6 +#define BOOT_ENTRY_R6_LOWER 7 +#define NUM_BOOT_ENTRY 8 +#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) + +static void __init +smp_85xx_kick_cpu(int nr) +{ + unsigned long flags; + const u64 *cpu_rel_addr; + __iomem u32 *bptr_vaddr; + struct device_node *np; + int n = 0; + + WARN_ON (nr < 0 || nr >= NR_CPUS); + + pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); + + local_irq_save(flags); + + np = of_get_cpu_node(nr, NULL); + cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); + + if (cpu_rel_addr == NULL) { + printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); + return; + } + + /* Map the spin table */ + bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); + + out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); + out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); + + /* Wait a bit for the CPU to ack. */ + while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) + mdelay(1); + + iounmap(bptr_vaddr); + + local_irq_restore(flags); + + pr_debug("waited %d msecs for CPU #%d.\n", n, nr); +} + +static void __init +smp_85xx_setup_cpu(int cpu_nr) +{ + mpic_setup_this_cpu(); + + /* Clear any pending timer interrupts */ + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); + + /* Enable decrementer interrupt */ + mtspr(SPRN_TCR, TCR_DIE); +} + +struct smp_ops_t smp_85xx_ops = { + .message_pass = smp_mpic_message_pass, + .probe = smp_mpic_probe, + .kick_cpu = smp_85xx_kick_cpu, + .setup_cpu = smp_85xx_setup_cpu, +}; + +void __init +mpc85xx_smp_init(void) +{ + smp_ops = &smp_85xx_ops; +} diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig index 77dd797a2580..8e5693935975 100644 --- a/arch/powerpc/platforms/86xx/Kconfig +++ b/arch/powerpc/platforms/86xx/Kconfig @@ -34,6 +34,8 @@ config MPC8610_HPCD config GEF_SBC610 bool "GE Fanuc SBC610" select DEFAULT_UIMAGE + select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB select HAS_RAPIDIO help This option enables support for GE Fanuc's SBC610. diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile index 4a56ff619afd..31e540c2ebbc 100644 --- a/arch/powerpc/platforms/86xx/Makefile +++ b/arch/powerpc/platforms/86xx/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_SMP) += mpc86xx_smp.o obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o obj-$(CONFIG_SBC8641D) += sbc8641d.o obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o -obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o +gef-gpio-$(CONFIG_GPIOLIB) += gef_gpio.o +obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o $(gef-gpio-y) diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c new file mode 100644 index 000000000000..85b2800f4cb7 --- /dev/null +++ b/arch/powerpc/platforms/86xx/gef_gpio.c @@ -0,0 +1,143 @@ +/* + * Driver for GE Fanuc's FPGA based GPIO pins + * + * Author: Martyn Welch <martyn.welch@gefanuc.com> + * + * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/* TODO + * + * Configuration of output modes (totem-pole/open-drain) + * Interrupt configuration - interrupts are always generated the FPGA relies on + * the I/O interrupt controllers mask to stop them propergating + */ + +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> + +#define GEF_GPIO_DIRECT 0x00 +#define GEF_GPIO_IN 0x04 +#define GEF_GPIO_OUT 0x08 +#define GEF_GPIO_TRIG 0x0C +#define GEF_GPIO_POLAR_A 0x10 +#define GEF_GPIO_POLAR_B 0x14 +#define GEF_GPIO_INT_STAT 0x18 +#define GEF_GPIO_OVERRUN 0x1C +#define GEF_GPIO_MODE 0x20 + +#define NUM_GPIO 19 + +static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value) +{ + unsigned int data; + + data = ioread32be(reg); + /* value: 0=low; 1=high */ + if (value & 0x1) + data = data | (0x1 << offset); + else + data = data & ~(0x1 << offset); + + iowrite32be(data, reg); +} + + +static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset) +{ + unsigned int data; + struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); + + data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT); + data = data | (0x1 << offset); + iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT); + + return 0; +} + +static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) +{ + unsigned int data; + struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); + + /* Set direction before switching to input */ + _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); + + data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT); + data = data & ~(0x1 << offset); + iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT); + + return 0; +} + +static int gef_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + unsigned int data; + int state = 0; + struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); + + data = ioread32be(mmchip->regs + GEF_GPIO_IN); + state = (int)((data >> offset) & 0x1); + + return state; +} + +static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); + + _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); +} + +static int __init gef_gpio_init(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "gef,sbc610-gpio") { + int retval; + struct of_mm_gpio_chip *gef_gpio_chip; + + pr_debug("%s: Initialising GEF GPIO\n", np->full_name); + + /* Allocate chip structure */ + gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); + if (!gef_gpio_chip) { + pr_err("%s: Unable to allocate structure\n", + np->full_name); + continue; + } + + /* Setup pointers to chip functions */ + gef_gpio_chip->of_gc.gpio_cells = 2; + gef_gpio_chip->of_gc.gc.ngpio = NUM_GPIO; + gef_gpio_chip->of_gc.gc.direction_input = gef_gpio_dir_in; + gef_gpio_chip->of_gc.gc.direction_output = gef_gpio_dir_out; + gef_gpio_chip->of_gc.gc.get = gef_gpio_get; + gef_gpio_chip->of_gc.gc.set = gef_gpio_set; + + /* This function adds a memory mapped GPIO chip */ + retval = of_mm_gpiochip_add(np, gef_gpio_chip); + if (retval) { + kfree(gef_gpio_chip); + pr_err("%s: Unable to add GPIO\n", np->full_name); + } + } + + return 0; +}; +arch_initcall(gef_gpio_init); + +MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver"); +MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 548efa55c8fe..3d0c776f888d 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -195,16 +195,24 @@ config SPE config PPC_STD_MMU bool - depends on 6xx || POWER3 || POWER4 || PPC64 + depends on 6xx || PPC64 default y config PPC_STD_MMU_32 def_bool y depends on PPC_STD_MMU && PPC32 +config PPC_STD_MMU_64 + def_bool y + depends on PPC_STD_MMU && PPC64 + +config PPC_MMU_NOHASH + def_bool y + depends on !PPC_STD_MMU + config PPC_MM_SLICES bool - default y if HUGETLB_PAGE || PPC_64K_PAGES + default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES) default n config VIRT_CPU_ACCOUNTING diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index c14d7d8d96c8..5cc3279559a4 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -2,13 +2,18 @@ config PPC_CELL bool default n -config PPC_CELL_NATIVE +config PPC_CELL_COMMON bool select PPC_CELL select PPC_DCR_MMIO - select PPC_OF_PLATFORM_PCI select PPC_INDIRECT_IO select PPC_NATIVE + select PPC_RTAS + +config PPC_CELL_NATIVE + bool + select PPC_CELL_COMMON + select PPC_OF_PLATFORM_PCI select MPIC select IBM_NEW_EMAC_EMAC4 select IBM_NEW_EMAC_RGMII @@ -20,7 +25,6 @@ config PPC_IBM_CELL_BLADE bool "IBM Cell Blade" depends on PPC_MULTIPLATFORM && PPC64 select PPC_CELL_NATIVE - select PPC_RTAS select MMIO_NVRAM select PPC_UDBG_16550 select UDBG_RTAS_CONSOLE @@ -28,16 +32,17 @@ config PPC_IBM_CELL_BLADE config PPC_CELLEB bool "Toshiba's Cell Reference Set 'Celleb' Architecture" depends on PPC_MULTIPLATFORM && PPC64 - select PPC_CELL select PPC_CELL_NATIVE - select PPC_RTAS - select PPC_INDIRECT_IO - select PPC_OF_PLATFORM_PCI select HAS_TXX9_SERIAL select PPC_UDBG_BEAT select USB_OHCI_BIG_ENDIAN_MMIO select USB_EHCI_BIG_ENDIAN_MMIO +config PPC_CELL_QPACE + bool "IBM Cell - QPACE" + depends on PPC_MULTIPLATFORM && PPC64 + select PPC_CELL_COMMON + menu "Cell Broadband Engine options" depends on PPC_CELL @@ -102,7 +107,7 @@ config PPC_IBM_CELL_POWERBUTTON config CBE_THERM tristate "CBE thermal support" default m - depends on CBE_RAS + depends on CBE_RAS && SPU_BASE config CBE_CPUFREQ tristate "CBE frequency scaling" @@ -136,5 +141,5 @@ endmenu config OPROFILE_CELL def_bool y - depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) + depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 7fd830872c43..43eccb270301 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -1,7 +1,7 @@ -obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \ - cbe_regs.o spider-pic.o \ - pervasive.o pmu.o io-workarounds.o \ - spider-pci.o +obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o + +obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ + pmu.o io-workarounds.o spider-pci.o obj-$(CONFIG_CBE_RAS) += ras.o obj-$(CONFIG_CBE_THERM) += cbe_thermal.o @@ -14,13 +14,12 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o ifeq ($(CONFIG_SMP),y) obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o +obj-$(CONFIG_PPC_CELL_QPACE) += smp.o endif # needed only when building loadable spufs.ko -spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o - -spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o -spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o +spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o +spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ spu_notify.o \ @@ -31,6 +30,8 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ obj-$(CONFIG_PCI_MSI) += axon_msi.o +# qpace setup +obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o # celleb stuff ifeq ($(CONFIG_PPC_CELLEB),y) diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c index b11cb30decb2..07c234f6b2b6 100644 --- a/arch/powerpc/platforms/cell/celleb_setup.c +++ b/arch/powerpc/platforms/cell/celleb_setup.c @@ -45,7 +45,6 @@ #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/kexec.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/cputable.h> @@ -226,9 +225,6 @@ define_machine(celleb_beat) { .pci_setup_phb = celleb_setup_phb, #ifdef CONFIG_KEXEC .kexec_cpu_down = beat_kexec_cpu_down, - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, #endif }; @@ -248,9 +244,4 @@ define_machine(celleb_native) { .pci_probe_mode = celleb_pci_probe_mode, .pci_setup_phb = celleb_setup_phb, .init_IRQ = celleb_init_IRQ_native, -#ifdef CONFIG_KEXEC - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, -#endif }; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 3168272ab0d7..86db4dd170a0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -1053,10 +1053,7 @@ static int __init cell_iommu_fixed_mapping_init(void) } /* We must have dma-ranges properties for fixed mapping to work */ - for (np = NULL; (np = of_find_all_nodes(np));) { - if (of_find_property(np, "dma-ranges", NULL)) - break; - } + np = of_find_node_with_property(NULL, "dma-ranges"); of_node_put(np); if (!np) { diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c new file mode 100644 index 000000000000..be84e6a16b30 --- /dev/null +++ b/arch/powerpc/platforms/cell/qpace_setup.c @@ -0,0 +1,152 @@ +/* + * linux/arch/powerpc/platforms/cell/qpace_setup.c + * + * Copyright (C) 1995 Linus Torvalds + * Adapted from 'alpha' version by Gary Thomas + * Modified by Cort Dougan (cort@cs.nmt.edu) + * Modified by PPC64 Team, IBM Corp + * Modified by Cell Team, IBM Deutschland Entwicklung GmbH + * Modified by Benjamin Krill <ben@codiert.org>, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/console.h> +#include <linux/of_platform.h> + +#include <asm/mmu.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/kexec.h> +#include <asm/pgtable.h> +#include <asm/prom.h> +#include <asm/rtas.h> +#include <asm/dma.h> +#include <asm/machdep.h> +#include <asm/time.h> +#include <asm/cputable.h> +#include <asm/irq.h> +#include <asm/spu.h> +#include <asm/spu_priv1.h> +#include <asm/udbg.h> +#include <asm/cell-regs.h> + +#include "interrupt.h" +#include "pervasive.h" +#include "ras.h" +#include "io-workarounds.h" + +static void qpace_show_cpuinfo(struct seq_file *m) +{ + struct device_node *root; + const char *model = ""; + + root = of_find_node_by_path("/"); + if (root) + model = of_get_property(root, "model", NULL); + seq_printf(m, "machine\t\t: CHRP %s\n", model); + of_node_put(root); +} + +static void qpace_progress(char *s, unsigned short hex) +{ + printk("*** %04x : %s\n", hex, s ? s : ""); +} + +static int __init qpace_publish_devices(void) +{ + int node; + + /* Publish OF platform devices for southbridge IOs */ + of_platform_bus_probe(NULL, NULL, NULL); + + /* There is no device for the MIC memory controller, thus we create + * a platform device for it to attach the EDAC driver to. + */ + for_each_online_node(node) { + if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) + continue; + platform_device_register_simple("cbe-mic", node, NULL, 0); + } + + return 0; +} +machine_subsys_initcall(qpace, qpace_publish_devices); + +extern int qpace_notify(struct device *dev) +{ + /* set dma_ops for of_platform bus */ + if (dev->bus && dev->bus->name + && !strcmp(dev->bus->name, "of_platform")) + set_dma_ops(dev, &dma_direct_ops); + + return 0; +} + +static void __init qpace_setup_arch(void) +{ +#ifdef CONFIG_SPU_BASE + spu_priv1_ops = &spu_priv1_mmio_ops; + spu_management_ops = &spu_management_of_ops; +#endif + + cbe_regs_init(); + +#ifdef CONFIG_CBE_RAS + cbe_ras_init(); +#endif + +#ifdef CONFIG_SMP + smp_init_cell(); +#endif + + /* init to some ~sane value until calibrate_delay() runs */ + loops_per_jiffy = 50000000; + + cbe_pervasive_init(); +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + + /* set notifier function */ + platform_notify = &qpace_notify; +} + +static int __init qpace_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + + if (!of_flat_dt_is_compatible(root, "IBM,QPACE")) + return 0; + + hpte_init_native(); + + return 1; +} + +define_machine(qpace) { + .name = "QPACE", + .probe = qpace_probe, + .setup_arch = qpace_setup_arch, + .show_cpuinfo = qpace_show_cpuinfo, + .restart = rtas_restart, + .power_off = rtas_power_off, + .halt = rtas_halt, + .get_boot_time = rtas_get_boot_time, + .calibrate_decr = generic_calibrate_decr, + .progress = qpace_progress, + .init_IRQ = iic_init_IRQ, +#ifdef CONFIG_KEXEC + .machine_kexec = default_machine_kexec, + .machine_kexec_prepare = default_machine_kexec_prepare, + .machine_crash_shutdown = default_machine_crash_shutdown, +#endif +}; diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index ab721b50fbba..59305369f6b2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -35,7 +35,6 @@ #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/kexec.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -289,9 +288,4 @@ define_machine(cell) { .progress = cell_progress, .init_IRQ = cell_init_irq, .pci_setup_phb = cell_setup_phb, -#ifdef CONFIG_KEXEC - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, -#endif }; diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 1b26071a86ca..7106b63d401b 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -273,12 +273,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; if (ctx->state == SPU_STATE_SAVED) { - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - & ~_PAGE_NO_CACHE); + vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); } else { - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE); + vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; } vm_insert_pfn(vma, address, pfn); @@ -338,8 +336,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE); + vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); vma->vm_ops = &spufs_mem_mmap_vmops; return 0; @@ -452,8 +449,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_cntl_mmap_vmops; return 0; @@ -1155,8 +1151,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal1_mmap_vmops; return 0; @@ -1292,8 +1287,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal2_mmap_vmops; return 0; @@ -1414,8 +1408,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mss_mmap_vmops; return 0; @@ -1476,8 +1469,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_psmap_mmap_vmops; return 0; @@ -1536,8 +1528,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; - vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) - | _PAGE_NO_CACHE | _PAGE_GUARDED); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mfc_mmap_vmops; return 0; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index cb85d237e492..6296bfd9cb0b 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -95,8 +95,8 @@ spufs_new_inode(struct super_block *sb, int mode) goto out; inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; out: @@ -323,7 +323,7 @@ static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt) goto out; } - filp = dentry_open(dentry, mnt, O_RDONLY); + filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); ret = PTR_ERR(filp); @@ -562,7 +562,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt) goto out; } - filp = dentry_open(dentry, mnt, O_RDONLY); + filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); ret = PTR_ERR(filp); diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index d3cde6b9d2df..f6b0c519d5a2 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -141,6 +141,7 @@ hydra_init(void) of_node_put(np); return 0; } + of_node_put(np); Hydra = ioremap(r.start, r.end-r.start); printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start); printk("Hydra Feature_Control was %x", @@ -198,7 +199,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d printk ("RTAS supporting Pegasos OF not found, please upgrade" " your firmware\n"); } - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); /* keep the reference to the root node */ } diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c index 32ba0fa0ad03..8cab5731850f 100644 --- a/arch/powerpc/platforms/embedded6xx/c2k.c +++ b/arch/powerpc/platforms/embedded6xx/c2k.c @@ -20,7 +20,6 @@ #include <linux/seq_file.h> #include <linux/time.h> #include <linux/of.h> -#include <linux/kexec.h> #include <asm/machdep.h> #include <asm/prom.h> @@ -147,9 +146,4 @@ define_machine(c2k) { .get_irq = mv64x60_get_irq, .restart = c2k_restart, .calibrate_decr = generic_calibrate_decr, -#ifdef CONFIG_KEXEC - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, -#endif }; diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c index 4c485e984236..670035f49a69 100644 --- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c +++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c @@ -19,7 +19,6 @@ #include <asm/prom.h> #include <asm/system.h> #include <asm/time.h> -#include <asm/kexec.h> #include <mm/mmu_decl.h> @@ -155,9 +154,4 @@ define_machine(prpmc2800){ .get_irq = mv64x60_get_irq, .restart = prpmc2800_restart, .calibrate_decr = generic_calibrate_decr, -#ifdef CONFIG_KEXEC - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, -#endif }; diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig index 45ffd8e542f4..ed3753d8c109 100644 --- a/arch/powerpc/platforms/iseries/Kconfig +++ b/arch/powerpc/platforms/iseries/Kconfig @@ -9,6 +9,7 @@ menu "iSeries device drivers" config VIODASD tristate "iSeries Virtual I/O disk support" + depends on BLOCK help If you are running on an iSeries system and you want to use virtual disks created and managed by OS/400, say Y. diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index d4c61c3c9669..bfd60e4accee 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -50,7 +50,6 @@ #include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> -#include <asm/kexec.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> #include <asm/machdep.h> @@ -335,9 +334,4 @@ define_machine(maple) { .calibrate_decr = generic_calibrate_decr, .progress = maple_progress, .power_save = power4_idle, -#ifdef CONFIG_KEXEC - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, -#endif }; diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 792d3ce8112e..65c585b8b00d 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -310,7 +310,7 @@ static int pmu_set_cpu_speed(int low_speed) _set_L3CR(save_l3cr); /* Restore userland MMU context */ - set_context(current->active_mm->context.id, current->active_mm->pgd); + switch_mmu_context(NULL, current->active_mm); #ifdef DEBUG_FREQ printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index bcf50d7056e9..54b7b76ed4f0 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -729,7 +729,7 @@ static void __init setup_bandit(struct pci_controller *hose, static int __init setup_uninorth(struct pci_controller *hose, struct resource *addr) { - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); has_uninorth = 1; hose->ops = ¯isc_pci_ops; hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); @@ -996,7 +996,7 @@ void __init pmac_pci_init(void) struct device_node *np, *root; struct device_node *ht = NULL; - ppc_pci_flags = PPC_PCI_CAN_SKIP_ISA_ALIGN; + ppc_pci_set_flags(PPC_PCI_CAN_SKIP_ISA_ALIGN); root = of_find_node_by_path("/"); if (root == NULL) { @@ -1055,7 +1055,7 @@ void __init pmac_pci_init(void) * some offset between bus number and domains for now when we * assign all busses should help for now */ - if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS) + if (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS)) pcibios_assign_bus_offset = 0x10; #endif } diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 82c14d203d8b..9b78f5300c24 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -60,7 +60,6 @@ #include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> -#include <asm/kexec.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> #include <asm/mediabay.h> @@ -310,9 +309,7 @@ static void __init pmac_setup_arch(void) } /* See if newworld or oldworld */ - for (ic = NULL; (ic = of_find_all_nodes(ic)) != NULL; ) - if (of_get_property(ic, "interrupt-controller", NULL)) - break; + ic = of_find_node_with_property(NULL, "interrupt-controller"); if (ic) { pmac_newworld = 1; of_node_put(ic); @@ -740,11 +737,6 @@ define_machine(powermac) { .pci_probe_mode = pmac_pci_probe_mode, .power_save = power4_idle, .enable_pmcs = power4_enable_pmcs, -#ifdef CONFIG_KEXEC - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, -#endif #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 .pcibios_enable_device_hook = pmac_pci_enable_device_hook, diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index adee28da353f..1c2802fabd57 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -17,6 +17,7 @@ #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/mmu.h> #define MAGIC 0x4c617273 /* 'Lars' */ @@ -323,7 +324,7 @@ grackle_wake_up: lwz r4,SL_IBAT3+4(r1) mtibatl 3,r4 -BEGIN_FTR_SECTION +BEGIN_MMU_FTR_SECTION li r4,0 mtspr SPRN_DBAT4U,r4 mtspr SPRN_DBAT4L,r4 @@ -341,7 +342,7 @@ BEGIN_FTR_SECTION mtspr SPRN_IBAT6L,r4 mtspr SPRN_IBAT7U,r4 mtspr SPRN_IBAT7L,r4 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) /* Flush all TLBs */ lis r4,0x1000 diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 40f72c2a4699..6b0711c15eca 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -739,7 +739,7 @@ static void __init smp_core99_setup(int ncpus) /* XXX should get this from reg properties */ for (i = 1; i < ncpus; ++i) - smp_hw_index[i] = i; + set_hard_smp_processor_id(i, i); } #endif diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index ffdd8e963fbd..dbc124e05646 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -314,11 +314,17 @@ static int __init ps3_setup_vuart_device(enum ps3_match_id match_id, result = ps3_system_bus_device_register(&p->dev); - if (result) + if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); - + goto fail_device_register; + } pr_debug(" <- %s:%d\n", __func__, __LINE__); + return 0; + +fail_device_register: + kfree(p); + pr_debug(" <- %s:%d fail\n", __func__, __LINE__); return result; } @@ -463,11 +469,17 @@ static int __init ps3_register_sound_devices(void) result = ps3_system_bus_device_register(&p->dev); - if (result) + if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); - + goto fail_device_register; + } pr_debug(" <- %s:%d\n", __func__, __LINE__); + return 0; + +fail_device_register: + kfree(p); + pr_debug(" <- %s:%d failed\n", __func__, __LINE__); return result; } @@ -485,17 +497,24 @@ static int __init ps3_register_graphics_devices(void) if (!p) return -ENOMEM; - p->dev.match_id = PS3_MATCH_ID_GRAPHICS; - p->dev.match_sub_id = PS3_MATCH_SUB_ID_FB; + p->dev.match_id = PS3_MATCH_ID_GPU; + p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB; p->dev.dev_type = PS3_DEVICE_TYPE_IOC0; result = ps3_system_bus_device_register(&p->dev); - if (result) + if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); + goto fail_device_register; + } pr_debug(" <- %s:%d\n", __func__, __LINE__); + return 0; + +fail_device_register: + kfree(p); + pr_debug(" <- %s:%d failed\n", __func__, __LINE__); return result; } diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 3a58ffabccd9..a4d49dd9e8a9 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -649,7 +649,7 @@ static int dma_sb_region_create(struct ps3_dma_region *r) { int result; - pr_info(" -> %s:%d:\n", __func__, __LINE__); + DBG(" -> %s:%d:\n", __func__, __LINE__); BUG_ON(!r); diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 77bc330263c4..35f3e85cf60e 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -23,7 +23,6 @@ #include <linux/fs.h> #include <linux/root_dev.h> #include <linux/console.h> -#include <linux/kexec.h> #include <linux/bootmem.h> #include <asm/machdep.h> @@ -42,6 +41,10 @@ #define DBG pr_debug #endif +/* mutex synchronizing GPU accesses and video mode changes */ +DEFINE_MUTEX(ps3_gpu_mutex); +EXPORT_SYMBOL_GPL(ps3_gpu_mutex); + #if !defined(CONFIG_SMP) static void smp_send_stop(void) {} #endif @@ -277,8 +280,5 @@ define_machine(ps3) { .halt = ps3_halt, #if defined(CONFIG_KEXEC) .kexec_cpu_down = ps3_kexec_cpu_down, - .machine_kexec = default_machine_kexec, - .machine_kexec_prepare = default_machine_kexec_prepare, - .machine_crash_shutdown = default_machine_crash_shutdown, #endif }; diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 661e9f77ebf6..ee0d22911621 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -31,7 +31,7 @@ #include "platform.h" static struct device ps3_system_bus = { - .bus_id = "ps3_system", + .init_name = "ps3_system", }; /* FIXME: need device usage counters! */ @@ -175,7 +175,7 @@ int ps3_open_hv_device(struct ps3_system_bus_device *dev) return ps3_open_hv_device_sb(dev); case PS3_MATCH_ID_SOUND: - case PS3_MATCH_ID_GRAPHICS: + case PS3_MATCH_ID_GPU: return ps3_open_hv_device_gpu(dev); case PS3_MATCH_ID_AV_SETTINGS: @@ -213,7 +213,7 @@ int ps3_close_hv_device(struct ps3_system_bus_device *dev) return ps3_close_hv_device_sb(dev); case PS3_MATCH_ID_SOUND: - case PS3_MATCH_ID_GRAPHICS: + case PS3_MATCH_ID_GPU: return ps3_close_hv_device_gpu(dev); case PS3_MATCH_ID_AV_SETTINGS: @@ -356,12 +356,12 @@ static int ps3_system_bus_match(struct device *_dev, if (result) pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n", __func__, __LINE__, - dev->match_id, dev->match_sub_id, dev->core.bus_id, + dev->match_id, dev->match_sub_id, dev_name(&dev->core), drv->match_id, drv->match_sub_id, drv->core.name); else pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n", __func__, __LINE__, - dev->match_id, dev->match_sub_id, dev->core.bus_id, + dev->match_id, dev->match_sub_id, dev_name(&dev->core), drv->match_id, drv->match_sub_id, drv->core.name); return result; @@ -383,9 +383,9 @@ static int ps3_system_bus_probe(struct device *_dev) result = drv->probe(dev); else pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__, - dev->core.bus_id); + dev_name(&dev->core)); - pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id); + pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core)); return result; } @@ -407,7 +407,7 @@ static int ps3_system_bus_remove(struct device *_dev) dev_dbg(&dev->core, "%s:%d %s: no remove method\n", __func__, __LINE__, drv->core.name); - pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id); + pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core)); return result; } @@ -432,7 +432,7 @@ static void ps3_system_bus_shutdown(struct device *_dev) BUG_ON(!drv); dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__, - dev->core.bus_id, drv->core.name); + dev_name(&dev->core), drv->core.name); if (drv->shutdown) drv->shutdown(dev); @@ -453,7 +453,8 @@ static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *en { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); - if (add_uevent_var(env, "MODALIAS=ps3:%d", dev->match_id)) + if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id, + dev->match_sub_id)) return -ENOMEM; return 0; } @@ -462,7 +463,8 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a, char *buf) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); - int len = snprintf(buf, PAGE_SIZE, "ps3:%d\n", dev->match_id); + int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id, + dev->match_sub_id); return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; } @@ -742,22 +744,18 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) switch (dev->dev_type) { case PS3_DEVICE_TYPE_IOC0: dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; - snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), - "ioc0_%02x", ++dev_ioc0_count); + dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); break; case PS3_DEVICE_TYPE_SB: dev->core.archdata.dma_ops = &ps3_sb_dma_ops; - snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), - "sb_%02x", ++dev_sb_count); + dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); break; case PS3_DEVICE_TYPE_VUART: - snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), - "vuart_%02x", ++dev_vuart_count); + dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count); break; case PS3_DEVICE_TYPE_LPM: - snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), - "lpm_%02x", ++dev_lpm_count); + dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count); break; default: BUG(); @@ -766,7 +764,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) dev->core.archdata.of_node = NULL; set_dev_node(&dev->core, 0); - pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id); + pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core)); result = device_register(&dev->core); return result; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 97619fd51e39..ddc2a307cd50 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -54,7 +54,7 @@ config PPC_SMLPAR config CMM tristate "Collaborative memory management" - depends on PPC_SMLPAR + depends on PPC_SMLPAR && !CRASH_DUMP default y help Select this option, if you want to enable the kernel interface diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 5cd4d2761620..6567439fe78d 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -28,6 +28,7 @@ #include <linux/kthread.h> #include <linux/module.h> #include <linux/oom.h> +#include <linux/reboot.h> #include <linux/sched.h> #include <linux/stringify.h> #include <linux/swap.h> @@ -384,6 +385,26 @@ static void cmm_unregister_sysfs(struct sys_device *sysdev) } /** + * cmm_reboot_notifier - Make sure pages are not still marked as "loaned" + * + **/ +static int cmm_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + if (action == SYS_RESTART) { + if (cmm_thread_ptr) + kthread_stop(cmm_thread_ptr); + cmm_thread_ptr = NULL; + cmm_free_pages(loaned_pages); + } + return NOTIFY_DONE; +} + +static struct notifier_block cmm_reboot_nb = { + .notifier_call = cmm_reboot_notifier, +}; + +/** * cmm_init - Module initialization * * Return value: @@ -399,9 +420,12 @@ static int cmm_init(void) if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0) return rc; - if ((rc = cmm_sysfs_register(&cmm_sysdev))) + if ((rc = register_reboot_notifier(&cmm_reboot_nb))) goto out_oom_notifier; + if ((rc = cmm_sysfs_register(&cmm_sysdev))) + goto out_reboot_notifier; + if (cmm_disabled) return rc; @@ -415,6 +439,8 @@ static int cmm_init(void) out_unregister_sysfs: cmm_unregister_sysfs(&cmm_sysdev); +out_reboot_notifier: + unregister_reboot_notifier(&cmm_reboot_nb); out_oom_notifier: unregister_oom_notifier(&cmm_oom_nb); return rc; @@ -431,6 +457,7 @@ static void cmm_exit(void) if (cmm_thread_ptr) kthread_stop(cmm_thread_ptr); unregister_oom_notifier(&cmm_oom_nb); + unregister_reboot_notifier(&cmm_reboot_nb); cmm_free_pages(loaned_pages); cmm_unregister_sysfs(&cmm_sysdev); } diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 54816d75b578..989d6462c154 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -21,6 +21,8 @@ * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> */ +#undef DEBUG + #include <linux/delay.h> #include <linux/init.h> #include <linux/list.h> @@ -488,10 +490,8 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || pdn->eeh_mode & EEH_MODE_NOCHECK) { ignored_check++; -#ifdef DEBUG - printk ("EEH:ignored check (%x) for %s %s\n", - pdn->eeh_mode, pci_name (dev), dn->full_name); -#endif + pr_debug("EEH: Ignored check (%x) for %s %s\n", + pdn->eeh_mode, pci_name (dev), dn->full_name); return 0; } @@ -1014,10 +1014,9 @@ static void *early_enable_eeh(struct device_node *dn, void *data) eeh_subsystem_enabled = 1; pdn->eeh_mode |= EEH_MODE_SUPPORTED; -#ifdef DEBUG - printk(KERN_DEBUG "EEH: %s: eeh enabled, config=%x pe_config=%x\n", - dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr); -#endif + pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n", + dn->full_name, pdn->eeh_config_addr, + pdn->eeh_pe_config_addr); } else { /* This device doesn't support EEH, but it may have an @@ -1161,13 +1160,17 @@ static void eeh_add_device_late(struct pci_dev *dev) if (!dev || !eeh_subsystem_enabled) return; -#ifdef DEBUG - printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev)); -#endif + pr_debug("EEH: Adding device %s\n", pci_name(dev)); - pci_dev_get (dev); dn = pci_device_to_OF_node(dev); pdn = PCI_DN(dn); + if (pdn->pcidev == dev) { + pr_debug("EEH: Already referenced !\n"); + return; + } + WARN_ON(pdn->pcidev); + + pci_dev_get (dev); pdn->pcidev = dev; pci_addr_cache_insert_device(dev); @@ -1206,17 +1209,18 @@ static void eeh_remove_device(struct pci_dev *dev) return; /* Unregister the device with the EEH/PCI address search system */ -#ifdef DEBUG - printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev)); -#endif - pci_addr_cache_remove_device(dev); - eeh_sysfs_remove_device(dev); + pr_debug("EEH: Removing device %s\n", pci_name(dev)); dn = pci_device_to_OF_node(dev); - if (PCI_DN(dn)->pcidev) { - PCI_DN(dn)->pcidev = NULL; - pci_dev_put (dev); + if (PCI_DN(dn)->pcidev == NULL) { + pr_debug("EEH: Not referenced !\n"); + return; } + PCI_DN(dn)->pcidev = NULL; + pci_dev_put (dev); + + pci_addr_cache_remove_device(dev); + eeh_sysfs_remove_device(dev); } void eeh_remove_bus_device(struct pci_dev *dev) diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 1f032483c026..a20ead87153d 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -116,7 +116,7 @@ static void pseries_cpu_die(unsigned int cpu) cpu_status = query_cpu_stopped(pcpu); if (cpu_status == 0 || cpu_status == -1) break; - msleep(200); + cpu_relax(); } if (cpu_status != 0) { printk("Querying DEAD? cpu %i (%i) shows %i\n", diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 7190493e9bdc..5e1ed3d60ee5 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -25,6 +25,8 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#undef DEBUG + #include <linux/pci.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> @@ -69,74 +71,25 @@ EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); * Remove all of the PCI devices under this bus both from the * linux pci device tree, and from the powerpc EEH address cache. */ -void -pcibios_remove_pci_devices(struct pci_bus *bus) +void pcibios_remove_pci_devices(struct pci_bus *bus) { - struct pci_dev *dev, *tmp; + struct pci_dev *dev, *tmp; + struct pci_bus *child_bus; + + /* First go down child busses */ + list_for_each_entry(child_bus, &bus->children, node) + pcibios_remove_pci_devices(child_bus); + pr_debug("PCI: Removing devices on bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { + pr_debug(" * Removing %s...\n", pci_name(dev)); eeh_remove_bus_device(dev); - pci_remove_bus_device(dev); - } + pci_remove_bus_device(dev); + } } EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); -/* Must be called before pci_bus_add_devices */ -void -pcibios_fixup_new_pci_devices(struct pci_bus *bus) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - /* Skip already-added devices */ - if (!dev->is_added) { - int i; - - /* Fill device archdata and setup iommu table */ - pcibios_setup_new_device(dev); - - pci_read_irq_line(dev); - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - struct resource *r = &dev->resource[i]; - - if (r->parent || !r->start || !r->flags) - continue; - pci_claim_resource(dev, i); - } - } - } -} -EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices); - -static int -pcibios_pci_config_bridge(struct pci_dev *dev) -{ - u8 sec_busno; - struct pci_bus *child_bus; - - /* Get busno of downstream bus */ - pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno); - - /* Add to children of PCI bridge dev->bus */ - child_bus = pci_add_new_bus(dev->bus, dev, sec_busno); - if (!child_bus) { - printk (KERN_ERR "%s: could not add second bus\n", __func__); - return -EIO; - } - sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number); - - pci_scan_child_bus(child_bus); - - /* Fixup new pci devices */ - pcibios_fixup_new_pci_devices(child_bus); - - /* Make the discovered devices available */ - pci_bus_add_devices(child_bus); - - eeh_add_device_tree_late(child_bus); - return 0; -} - /** * pcibios_add_pci_devices - adds new pci devices to bus * @@ -147,10 +100,9 @@ pcibios_pci_config_bridge(struct pci_dev *dev) * is how this routine differs from other, similar pcibios * routines.) */ -void -pcibios_add_pci_devices(struct pci_bus * bus) +void pcibios_add_pci_devices(struct pci_bus * bus) { - int slotno, num, mode; + int slotno, num, mode, pass, max; struct pci_dev *dev; struct device_node *dn = pci_bus_to_OF_node(bus); @@ -162,26 +114,23 @@ pcibios_add_pci_devices(struct pci_bus * bus) if (mode == PCI_PROBE_DEVTREE) { /* use ofdt-based probe */ - of_scan_bus(dn, bus); - if (!list_empty(&bus->devices)) { - pcibios_fixup_new_pci_devices(bus); - pci_bus_add_devices(bus); - eeh_add_device_tree_late(bus); - } + of_rescan_bus(dn, bus); } else if (mode == PCI_PROBE_NORMAL) { /* use legacy probe */ slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); - if (num) { - pcibios_fixup_new_pci_devices(bus); - pci_bus_add_devices(bus); - eeh_add_device_tree_late(bus); + if (!num) + return; + pcibios_setup_bus_devices(bus); + max = bus->secondary; + for (pass=0; pass < 2; pass++) + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + max = pci_scan_bridge(bus, dev, max, pass); } - - list_for_each_entry(dev, &bus->devices, bus_list) - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - pcibios_pci_config_bridge(dev); } + pcibios_finish_adding_to_bus(bus); } EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); @@ -190,6 +139,8 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) struct pci_controller *phb; int primary; + pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name); + primary = list_empty(&hose_list); phb = pcibios_alloc_controller(dn); if (!phb) @@ -203,11 +154,59 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) eeh_add_device_tree_early(dn); scan_phb(phb); - pcibios_allocate_bus_resources(phb->bus); - pcibios_fixup_new_pci_devices(phb->bus); - pci_bus_add_devices(phb->bus); - eeh_add_device_tree_late(phb->bus); + pcibios_finish_adding_to_bus(phb->bus); return phb; } EXPORT_SYMBOL_GPL(init_phb_dynamic); + +/* RPA-specific bits for removing PHBs */ +int remove_phb_dynamic(struct pci_controller *phb) +{ + struct pci_bus *b = phb->bus; + struct resource *res; + int rc, i; + + pr_debug("PCI: Removing PHB %04x:%02x... \n", + pci_domain_nr(b), b->number); + + /* We cannot to remove a root bus that has children */ + if (!(list_empty(&b->children) && list_empty(&b->devices))) + return -EBUSY; + + /* We -know- there aren't any child devices anymore at this stage + * and thus, we can safely unmap the IO space as it's not in use + */ + res = &phb->io_resource; + if (res->flags & IORESOURCE_IO) { + rc = pcibios_unmap_io_space(b); + if (rc) { + printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", + __func__, b->name); + return 1; + } + } + + /* Unregister the bridge device from sysfs and remove the PCI bus */ + device_unregister(b->bridge); + phb->bus = NULL; + pci_remove_bus(b); + + /* Now release the IO resource */ + if (res->flags & IORESOURCE_IO) + release_resource(res); + + /* Release memory resources */ + for (i = 0; i < 3; ++i) { + res = &phb->mem_resources[i]; + if (!(res->flags & IORESOURCE_MEM)) + continue; + release_resource(res); + } + + /* Free pci_controller data structure */ + pcibios_free_controller(phb); + + return 0; +} +EXPORT_SYMBOL_GPL(remove_phb_dynamic); diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c index edbc012c2ebc..6cf35cd8d0b5 100644 --- a/arch/powerpc/platforms/pseries/phyp_dump.c +++ b/arch/powerpc/platforms/pseries/phyp_dump.c @@ -130,6 +130,9 @@ static unsigned long init_dump_header(struct phyp_dump_header *ph) static void print_dump_header(const struct phyp_dump_header *ph) { #ifdef DEBUG + if (ph == NULL) + return; + printk(KERN_INFO "dump header:\n"); /* setup some ph->sections required */ printk(KERN_INFO "version = %d\n", ph->version); @@ -411,6 +414,8 @@ static int __init phyp_dump_setup(void) of_node_put(rtas); } + ibm_configure_kernel_dump = rtas_token("ibm,configure-kernel-dump"); + print_dump_header(dump_header); dump_area_length = init_dump_header(&phdr); /* align down */ diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index e1904774a70f..f7a69021b7bf 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -579,7 +579,7 @@ static void xics_update_irq_servers(void) int i, j; struct device_node *np; u32 ilen; - const u32 *ireg, *isize; + const u32 *ireg; u32 hcpuid; /* Find the server numbers for the boot cpu. */ @@ -607,11 +607,6 @@ static void xics_update_irq_servers(void) } } - /* get the bit size of server numbers */ - isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); - if (isize) - interrupt_server_size = *isize; - of_node_put(np); } @@ -682,6 +677,7 @@ void __init xics_init_IRQ(void) struct device_node *np; u32 indx = 0; int found = 0; + const u32 *isize; ppc64_boot_msg(0x20, "XICS Init"); @@ -701,6 +697,26 @@ void __init xics_init_IRQ(void) if (found == 0) return; + /* get the bit size of server numbers */ + found = 0; + + for_each_compatible_node(np, NULL, "ibm,ppc-xics") { + isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); + + if (!isize) + continue; + + if (!found) { + interrupt_server_size = *isize; + found = 1; + } else if (*isize != interrupt_server_size) { + printk(KERN_WARNING "XICS: " + "mismatched ibm,interrupt-server#-size\n"); + interrupt_server_size = max(*isize, + interrupt_server_size); + } + } + xics_update_irq_servers(); xics_init_host(); @@ -728,9 +744,18 @@ static void xics_set_cpu_priority(unsigned char cppr) /* Have the calling processor join or leave the specified global queue */ static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) { - int status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, - (1UL << interrupt_server_size) - 1 - gserver, join); - WARN_ON(status < 0); + int index; + int status; + + if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) + return; + + index = (1UL << interrupt_server_size) - 1 - gserver; + + status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); + + WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", + GLOBAL_INTERRUPT_QUEUE, index, join, status); } void xics_setup_cpu(void) diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c index 1f5258fb38c3..901c9f91e5dd 100644 --- a/arch/powerpc/sysdev/bestcomm/ata.c +++ b/arch/powerpc/sysdev/bestcomm/ata.c @@ -61,6 +61,9 @@ bcom_ata_init(int queue_len, int maxbufsize) struct bcom_ata_var *var; struct bcom_ata_inc *inc; + /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */ + bcom_disable_prefetch(); + tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); if (!tsk) return NULL; diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h index 10982769c465..0b2371811334 100644 --- a/arch/powerpc/sysdev/bestcomm/ata.h +++ b/arch/powerpc/sysdev/bestcomm/ata.h @@ -16,22 +16,15 @@ struct bcom_ata_bd { u32 status; - u32 dst_pa; u32 src_pa; + u32 dst_pa; }; -extern struct bcom_task * -bcom_ata_init(int queue_len, int maxbufsize); - -extern void -bcom_ata_rx_prepare(struct bcom_task *tsk); - -extern void -bcom_ata_tx_prepare(struct bcom_task *tsk); - -extern void -bcom_ata_reset_bd(struct bcom_task *tsk); - +extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize); +extern void bcom_ata_rx_prepare(struct bcom_task *tsk); +extern void bcom_ata_tx_prepare(struct bcom_task *tsk); +extern void bcom_ata_reset_bd(struct bcom_task *tsk); +extern void bcom_ata_release(struct bcom_task *tsk); #endif /* __BESTCOMM_ATA_H__ */ diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index 446c9ea85b30..378ebd9aac18 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c @@ -279,7 +279,6 @@ bcom_engine_init(void) int task; phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa; unsigned int tdt_size, ctx_size, var_size, fdt_size; - u16 regval; /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt); @@ -331,10 +330,8 @@ bcom_engine_init(void) out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS); /* Disable COMM Bus Prefetch on the original 5200; it's broken */ - if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) { - regval = in_be16(&bcom_eng->regs->PtdCntrl); - out_be16(&bcom_eng->regs->PtdCntrl, regval | 1); - } + if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) + bcom_disable_prefetch(); /* Init lock */ spin_lock_init(&bcom_eng->lock); diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h index c960a8b49655..23a95f80dfdb 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h @@ -16,8 +16,19 @@ #ifndef __BESTCOMM_H__ #define __BESTCOMM_H__ -struct bcom_bd; /* defined later on ... */ - +/** + * struct bcom_bd - Structure describing a generic BestComm buffer descriptor + * @status: The current status of this buffer. Exact meaning depends on the + * task type + * @data: An array of u32 extra data. Size of array is task dependant. + * + * Note: Don't dereference a bcom_bd pointer as an array. The size of the + * bcom_bd is variable. Use bcom_get_bd() instead. + */ +struct bcom_bd { + u32 status; + u32 data[0]; /* variable payload size */ +}; /* ======================================================================== */ /* Generic task management */ @@ -84,17 +95,6 @@ bcom_get_task_irq(struct bcom_task *tsk) { /* BD based tasks helpers */ /* ======================================================================== */ -/** - * struct bcom_bd - Structure describing a generic BestComm buffer descriptor - * @status: The current status of this buffer. Exact meaning depends on the - * task type - * @data: An array of u32 whose meaning depends on the task type. - */ -struct bcom_bd { - u32 status; - u32 data[1]; /* variable, but at least 1 */ -}; - #define BCOM_BD_READY 0x40000000ul /** _bcom_next_index - Get next input index. @@ -140,15 +140,31 @@ bcom_queue_full(struct bcom_task *tsk) } /** + * bcom_get_bd - Get a BD from the queue + * @tsk: The BestComm task structure + * index: Index of the BD to fetch + */ +static inline struct bcom_bd +*bcom_get_bd(struct bcom_task *tsk, unsigned int index) +{ + /* A cast to (void*) so the address can be incremented by the + * real size instead of by sizeof(struct bcom_bd) */ + return ((void *)tsk->bd) + (index * tsk->bd_size); +} + +/** * bcom_buffer_done - Checks if a BestComm * @tsk: The BestComm task structure */ static inline int bcom_buffer_done(struct bcom_task *tsk) { + struct bcom_bd *bd; if (bcom_queue_empty(tsk)) return 0; - return !(tsk->bd[tsk->outdex].status & BCOM_BD_READY); + + bd = bcom_get_bd(tsk, tsk->outdex); + return !(bd->status & BCOM_BD_READY); } /** @@ -160,16 +176,21 @@ bcom_buffer_done(struct bcom_task *tsk) static inline struct bcom_bd * bcom_prepare_next_buffer(struct bcom_task *tsk) { - tsk->bd[tsk->index].status = 0; /* cleanup last status */ - return &tsk->bd[tsk->index]; + struct bcom_bd *bd; + + bd = bcom_get_bd(tsk, tsk->index); + bd->status = 0; /* cleanup last status */ + return bd; } static inline void bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie) { + struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index); + tsk->cookie[tsk->index] = cookie; mb(); /* ensure the bd is really up-to-date */ - tsk->bd[tsk->index].status |= BCOM_BD_READY; + bd->status |= BCOM_BD_READY; tsk->index = _bcom_next_index(tsk); if (tsk->flags & BCOM_FLAGS_ENABLE_TASK) bcom_enable(tsk); @@ -179,10 +200,12 @@ static inline void * bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd) { void *cookie = tsk->cookie[tsk->outdex]; + struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex); + if (p_status) - *p_status = tsk->bd[tsk->outdex].status; + *p_status = bd->status; if (p_bd) - *p_bd = &tsk->bd[tsk->outdex]; + *p_bd = bd; tsk->outdex = _bcom_next_outdex(tsk); return cookie; } diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h index 866a2915ef2f..eb0d1c883c31 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h @@ -198,8 +198,8 @@ struct bcom_task_header { #define BCOM_IPR_SCTMR_1 2 #define BCOM_IPR_FEC_RX 6 #define BCOM_IPR_FEC_TX 5 -#define BCOM_IPR_ATA_RX 4 -#define BCOM_IPR_ATA_TX 3 +#define BCOM_IPR_ATA_RX 7 +#define BCOM_IPR_ATA_TX 7 #define BCOM_IPR_SCPCI_RX 2 #define BCOM_IPR_SCPCI_TX 2 #define BCOM_IPR_PSC3_RX 2 @@ -241,6 +241,22 @@ extern void bcom_set_initiator(int task, int initiator); #define TASK_ENABLE 0x8000 +/** + * bcom_disable_prefetch - Hook to disable bus prefetching + * + * ATA DMA and the original MPC5200 need this due to silicon bugs. At the + * moment disabling prefetch is a one-way street. There is no mechanism + * in place to turn prefetch back on after it has been disabled. There is + * no reason it couldn't be done, it would just be more complex to implement. + */ +static inline void bcom_disable_prefetch(void) +{ + u16 regval; + + regval = in_be16(&bcom_eng->regs->PtdCntrl); + out_be16(&bcom_eng->regs->PtdCntrl, regval | 1); +}; + static inline void bcom_enable_task(int task) { diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S index 2078f39e2f17..d3098ef1404a 100644 --- a/arch/powerpc/sysdev/dcr-low.S +++ b/arch/powerpc/sysdev/dcr-low.S @@ -11,14 +11,20 @@ #include <asm/ppc_asm.h> #include <asm/processor.h> +#include <asm/bug.h> #define DCR_ACCESS_PROLOG(table) \ + cmpli cr0,r3,1024; \ rlwinm r3,r3,4,18,27; \ lis r5,table@h; \ ori r5,r5,table@l; \ add r3,r3,r5; \ + bge- 1f; \ mtctr r3; \ - bctr + bctr; \ +1: trap; \ + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \ + blr _GLOBAL(__mfdcr) DCR_ACCESS_PROLOG(__mfdcr_table) diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c index a8ba9983dd5a..bb44aa9fd470 100644 --- a/arch/powerpc/sysdev/dcr.c +++ b/arch/powerpc/sysdev/dcr.c @@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(dcr_write_generic); #endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ -unsigned int dcr_resource_start(struct device_node *np, unsigned int index) +unsigned int dcr_resource_start(const struct device_node *np, + unsigned int index) { unsigned int ds; const u32 *dr = of_get_property(np, "dcr-reg", &ds); @@ -136,7 +137,7 @@ unsigned int dcr_resource_start(struct device_node *np, unsigned int index) } EXPORT_SYMBOL_GPL(dcr_resource_start); -unsigned int dcr_resource_len(struct device_node *np, unsigned int index) +unsigned int dcr_resource_len(const struct device_node *np, unsigned int index) { unsigned int ds; const u32 *dr = of_get_property(np, "dcr-reg", &ds); diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 5b264eb4b1f7..d5f9ae0f1b75 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -187,7 +187,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; @@ -300,7 +300,7 @@ int __init mpc83xx_add_bridge(struct device_node *dev) " bus 0\n", dev->full_name); } - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 26ecb96f9731..115cb16351fd 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -207,236 +207,51 @@ static int __init of_add_fixed_phys(void) arch_initcall(of_add_fixed_phys); #endif /* CONFIG_FIXED_PHY */ -static int gfar_mdio_of_init_one(struct device_node *np) -{ - int k; - struct device_node *child = NULL; - struct gianfar_mdio_data mdio_data; - struct platform_device *mdio_dev; - struct resource res; - int ret; - - memset(&res, 0, sizeof(res)); - memset(&mdio_data, 0, sizeof(mdio_data)); - - ret = of_address_to_resource(np, 0, &res); - if (ret) - return ret; - - /* The gianfar device will try to use the same ID created below to find - * this bus, to coordinate register access (since they share). */ - mdio_dev = platform_device_register_simple("fsl-gianfar_mdio", - res.start&0xfffff, &res, 1); - if (IS_ERR(mdio_dev)) - return PTR_ERR(mdio_dev); - - for (k = 0; k < 32; k++) - mdio_data.irq[k] = PHY_POLL; - - while ((child = of_get_next_child(np, child)) != NULL) { - int irq = irq_of_parse_and_map(child, 0); - if (irq != NO_IRQ) { - const u32 *id = of_get_property(child, "reg", NULL); - mdio_data.irq[*id] = irq; - } - } - - ret = platform_device_add_data(mdio_dev, &mdio_data, - sizeof(struct gianfar_mdio_data)); - if (ret) - platform_device_unregister(mdio_dev); - - return ret; -} - -static int __init gfar_mdio_of_init(void) -{ - struct device_node *np = NULL; - - for_each_compatible_node(np, NULL, "fsl,gianfar-mdio") - gfar_mdio_of_init_one(np); - - /* try the deprecated version */ - for_each_compatible_node(np, "mdio", "gianfar"); - gfar_mdio_of_init_one(np); - - return 0; -} - -arch_initcall(gfar_mdio_of_init); - -static const char *gfar_tx_intr = "tx"; -static const char *gfar_rx_intr = "rx"; -static const char *gfar_err_intr = "error"; - -static int __init gfar_of_init(void) +#ifdef CONFIG_PPC_83xx +static int __init mpc83xx_wdt_init(void) { + struct resource r; struct device_node *np; - unsigned int i; - struct platform_device *gfar_dev; - struct resource res; + struct platform_device *dev; + u32 freq = fsl_get_sys_freq(); int ret; - for (np = NULL, i = 0; - (np = of_find_compatible_node(np, "network", "gianfar")) != NULL; - i++) { - struct resource r[4]; - struct device_node *phy, *mdio; - struct gianfar_platform_data gfar_data; - const unsigned int *id; - const char *model; - const char *ctype; - const void *mac_addr; - const phandle *ph; - int n_res = 2; - - if (!of_device_is_available(np)) - continue; - - memset(r, 0, sizeof(r)); - memset(&gfar_data, 0, sizeof(gfar_data)); - - ret = of_address_to_resource(np, 0, &r[0]); - if (ret) - goto err; - - of_irq_to_resource(np, 0, &r[1]); - - model = of_get_property(np, "model", NULL); - - /* If we aren't the FEC we have multiple interrupts */ - if (model && strcasecmp(model, "FEC")) { - r[1].name = gfar_tx_intr; - - r[2].name = gfar_rx_intr; - of_irq_to_resource(np, 1, &r[2]); + np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt"); - r[3].name = gfar_err_intr; - of_irq_to_resource(np, 2, &r[3]); - - n_res += 2; - } - - gfar_dev = - platform_device_register_simple("fsl-gianfar", i, &r[0], - n_res); - - if (IS_ERR(gfar_dev)) { - ret = PTR_ERR(gfar_dev); - goto err; - } - - mac_addr = of_get_mac_address(np); - if (mac_addr) - memcpy(gfar_data.mac_addr, mac_addr, 6); - - if (model && !strcasecmp(model, "TSEC")) - gfar_data.device_flags = - FSL_GIANFAR_DEV_HAS_GIGABIT | - FSL_GIANFAR_DEV_HAS_COALESCE | - FSL_GIANFAR_DEV_HAS_RMON | - FSL_GIANFAR_DEV_HAS_MULTI_INTR; - if (model && !strcasecmp(model, "eTSEC")) - gfar_data.device_flags = - FSL_GIANFAR_DEV_HAS_GIGABIT | - FSL_GIANFAR_DEV_HAS_COALESCE | - FSL_GIANFAR_DEV_HAS_RMON | - FSL_GIANFAR_DEV_HAS_MULTI_INTR | - FSL_GIANFAR_DEV_HAS_CSUM | - FSL_GIANFAR_DEV_HAS_VLAN | - FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; - - ctype = of_get_property(np, "phy-connection-type", NULL); - - /* We only care about rgmii-id. The rest are autodetected */ - if (ctype && !strcmp(ctype, "rgmii-id")) - gfar_data.interface = PHY_INTERFACE_MODE_RGMII_ID; - else - gfar_data.interface = PHY_INTERFACE_MODE_MII; - - if (of_get_property(np, "fsl,magic-packet", NULL)) - gfar_data.device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; - - ph = of_get_property(np, "phy-handle", NULL); - if (ph == NULL) { - u32 *fixed_link; - - fixed_link = (u32 *)of_get_property(np, "fixed-link", - NULL); - if (!fixed_link) { - ret = -ENODEV; - goto unreg; - } - - snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "0"); - gfar_data.phy_id = fixed_link[0]; - } else { - phy = of_find_node_by_phandle(*ph); - - if (phy == NULL) { - ret = -ENODEV; - goto unreg; - } - - mdio = of_get_parent(phy); - - id = of_get_property(phy, "reg", NULL); - ret = of_address_to_resource(mdio, 0, &res); - if (ret) { - of_node_put(phy); - of_node_put(mdio); - goto unreg; - } - - gfar_data.phy_id = *id; - snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "%llx", - (unsigned long long)res.start&0xfffff); + if (!np) { + ret = -ENODEV; + goto nodev; + } - of_node_put(phy); - of_node_put(mdio); - } + memset(&r, 0, sizeof(r)); - /* Get MDIO bus controlled by this eTSEC, if any. Normally only - * eTSEC 1 will control an MDIO bus, not necessarily the same - * bus that its PHY is on ('mdio' above), so we can't just use - * that. What we do is look for a gianfar mdio device that has - * overlapping registers with this device. That's really the - * whole point, to find the device sharing our registers to - * coordinate access with it. - */ - for_each_compatible_node(mdio, NULL, "fsl,gianfar-mdio") { - if (of_address_to_resource(mdio, 0, &res)) - continue; - - if (res.start >= r[0].start && res.end <= r[0].end) { - /* Get the ID the mdio bus platform device was - * registered with. gfar_data.bus_id is - * different because it's for finding a PHY, - * while this is for finding a MII bus. - */ - gfar_data.mdio_bus = res.start&0xfffff; - of_node_put(mdio); - break; - } - } + ret = of_address_to_resource(np, 0, &r); + if (ret) + goto err; - ret = - platform_device_add_data(gfar_dev, &gfar_data, - sizeof(struct - gianfar_platform_data)); - if (ret) - goto unreg; + dev = platform_device_register_simple("mpc83xx_wdt", 0, &r, 1); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto err; } + ret = platform_device_add_data(dev, &freq, sizeof(freq)); + if (ret) + goto unreg; + + of_node_put(np); return 0; unreg: - platform_device_unregister(gfar_dev); + platform_device_unregister(dev); err: + of_node_put(np); +nodev: return ret; } -arch_initcall(gfar_of_init); +arch_initcall(mpc83xx_wdt_init); +#endif static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type) { diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c index d502927644c6..5da37c2f22ee 100644 --- a/arch/powerpc/sysdev/grackle.c +++ b/arch/powerpc/sysdev/grackle.c @@ -57,7 +57,7 @@ void __init setup_grackle(struct pci_controller *hose) { setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); if (machine_is_compatible("PowerMac1,1")) - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); if (machine_is_compatible("AAPL,PowerBook1998")) grackle_set_loop_snoop(hose, 1); #if 0 /* Disabled for now, HW problems ??? */ diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 1890fb085cde..c82babb70074 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -661,17 +661,6 @@ static inline void mpic_eoi(struct mpic *mpic) (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI)); } -#ifdef CONFIG_SMP -static irqreturn_t mpic_ipi_action(int irq, void *data) -{ - long ipi = (long)data; - - smp_message_recv(ipi); - - return IRQ_HANDLED; -} -#endif /* CONFIG_SMP */ - /* * Linux descriptor level callbacks */ @@ -1548,13 +1537,7 @@ unsigned int mpic_get_mcirq(void) void mpic_request_ipis(void) { struct mpic *mpic = mpic_primary; - long i, err; - static char *ipi_names[] = { - "IPI0 (call function)", - "IPI1 (reschedule)", - "IPI2 (call function single)", - "IPI3 (debugger break)", - }; + int i; BUG_ON(mpic == NULL); printk(KERN_INFO "mpic: requesting IPIs ... \n"); @@ -1563,17 +1546,10 @@ void mpic_request_ipis(void) unsigned int vipi = irq_create_mapping(mpic->irqhost, mpic->ipi_vecs[0] + i); if (vipi == NO_IRQ) { - printk(KERN_ERR "Failed to map IPI %ld\n", i); - break; - } - err = request_irq(vipi, mpic_ipi_action, - IRQF_DISABLED|IRQF_PERCPU, - ipi_names[i], (void *)i); - if (err) { - printk(KERN_ERR "Request of irq %d for IPI %ld failed\n", - vipi, i); - break; + printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]); + continue; } + smp_request_message_ipi(vipi, i); } } diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index d3e4d61030b5..77fae5f64f2e 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c @@ -194,11 +194,41 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, * 4xx PCI 2.x part */ +static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose, + void __iomem *reg, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 ma, pcila, pciha; + + if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || + size < 0x1000 || (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%s: Resource out of range\n", + hose->dn->full_name); + return -1; + } + ma = (0xffffffffu << ilog2(size)) | 1; + if (flags & IORESOURCE_PREFETCH) + ma |= 2; + + pciha = RES_TO_U32_HIGH(pci_addr); + pcila = RES_TO_U32_LOW(pci_addr); + + writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index)); + writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index)); + writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index)); + writel(ma, reg + PCIL0_PMM0MA + (0x10 * index)); + + return 0; +} + static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, void __iomem *reg) { - u32 la, ma, pcila, pciha; - int i, j; + int i, j, found_isa_hole = 0; /* Setup outbound memory windows */ for (i = j = 0; i < 3; i++) { @@ -213,28 +243,29 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, break; } - /* Calculate register values */ - la = res->start; - pciha = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset); - pcila = RES_TO_U32_LOW(res->start - hose->pci_mem_offset); - - ma = res->end + 1 - res->start; - if (!is_power_of_2(ma) || ma < 0x1000 || ma > 0xffffffffu) { - printk(KERN_WARNING "%s: Resource out of range\n", - hose->dn->full_name); - continue; + /* Configure the resource */ + if (ppc4xx_setup_one_pci_PMM(hose, reg, + res->start, + res->start - hose->pci_mem_offset, + res->end + 1 - res->start, + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == hose->pci_mem_offset) + found_isa_hole = 1; } - ma = (0xffffffffu << ilog2(ma)) | 0x1; - if (res->flags & IORESOURCE_PREFETCH) - ma |= 0x2; - - /* Program register values */ - writel(la, reg + PCIL0_PMM0LA + (0x10 * j)); - writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * j)); - writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * j)); - writel(ma, reg + PCIL0_PMM0MA + (0x10 * j)); - j++; } + + /* Handle ISA memory hole if not already covered */ + if (j <= 2 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", + hose->dn->full_name); } static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose, @@ -352,11 +383,52 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np) * 4xx PCI-X part */ +static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose, + void __iomem *reg, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 lah, lal, pciah, pcial, sa; + + if (!is_power_of_2(size) || size < 0x1000 || + (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%s: Resource out of range\n", + hose->dn->full_name); + return -1; + } + + /* Calculate register values */ + lah = RES_TO_U32_HIGH(plb_addr); + lal = RES_TO_U32_LOW(plb_addr); + pciah = RES_TO_U32_HIGH(pci_addr); + pcial = RES_TO_U32_LOW(pci_addr); + sa = (0xffffffffu << ilog2(size)) | 0x1; + + /* Program register values */ + if (index == 0) { + writel(lah, reg + PCIX0_POM0LAH); + writel(lal, reg + PCIX0_POM0LAL); + writel(pciah, reg + PCIX0_POM0PCIAH); + writel(pcial, reg + PCIX0_POM0PCIAL); + writel(sa, reg + PCIX0_POM0SA); + } else { + writel(lah, reg + PCIX0_POM1LAH); + writel(lal, reg + PCIX0_POM1LAL); + writel(pciah, reg + PCIX0_POM1PCIAH); + writel(pcial, reg + PCIX0_POM1PCIAL); + writel(sa, reg + PCIX0_POM1SA); + } + + return 0; +} + static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, void __iomem *reg) { - u32 lah, lal, pciah, pcial, sa; - int i, j; + int i, j, found_isa_hole = 0; /* Setup outbound memory windows */ for (i = j = 0; i < 3; i++) { @@ -371,36 +443,29 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, break; } - /* Calculate register values */ - lah = RES_TO_U32_HIGH(res->start); - lal = RES_TO_U32_LOW(res->start); - pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset); - pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset); - sa = res->end + 1 - res->start; - if (!is_power_of_2(sa) || sa < 0x100000 || - sa > 0xffffffffu) { - printk(KERN_WARNING "%s: Resource out of range\n", - hose->dn->full_name); - continue; + /* Configure the resource */ + if (ppc4xx_setup_one_pcix_POM(hose, reg, + res->start, + res->start - hose->pci_mem_offset, + res->end + 1 - res->start, + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == hose->pci_mem_offset) + found_isa_hole = 1; } - sa = (0xffffffffu << ilog2(sa)) | 0x1; - - /* Program register values */ - if (j == 0) { - writel(lah, reg + PCIX0_POM0LAH); - writel(lal, reg + PCIX0_POM0LAL); - writel(pciah, reg + PCIX0_POM0PCIAH); - writel(pcial, reg + PCIX0_POM0PCIAL); - writel(sa, reg + PCIX0_POM0SA); - } else { - writel(lah, reg + PCIX0_POM1LAH); - writel(lal, reg + PCIX0_POM1LAL); - writel(pciah, reg + PCIX0_POM1PCIAH); - writel(pcial, reg + PCIX0_POM1PCIAL); - writel(sa, reg + PCIX0_POM1SA); - } - j++; } + + /* Handle ISA memory hole if not already covered */ + if (j <= 1 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", + hose->dn->full_name); } static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose, @@ -1317,12 +1382,72 @@ static struct pci_ops ppc4xx_pciex_pci_ops = .write = ppc4xx_pciex_write_config, }; +static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 lah, lal, pciah, pcial, sa; + + if (!is_power_of_2(size) || + (index < 2 && size < 0x100000) || + (index == 2 && size < 0x100) || + (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%s: Resource out of range\n", + hose->dn->full_name); + return -1; + } + + /* Calculate register values */ + lah = RES_TO_U32_HIGH(plb_addr); + lal = RES_TO_U32_LOW(plb_addr); + pciah = RES_TO_U32_HIGH(pci_addr); + pcial = RES_TO_U32_LOW(pci_addr); + sa = (0xffffffffu << ilog2(size)) | 0x1; + + /* Program register values */ + switch (index) { + case 0: + out_le32(mbase + PECFG_POM0LAH, pciah); + out_le32(mbase + PECFG_POM0LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff); + /* Note that 3 here means enabled | single region */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3); + break; + case 1: + out_le32(mbase + PECFG_POM1LAH, pciah); + out_le32(mbase + PECFG_POM1LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff); + /* Note that 3 here means enabled | single region */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3); + break; + case 2: + out_le32(mbase + PECFG_POM2LAH, pciah); + out_le32(mbase + PECFG_POM2LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff); + /* Note that 3 here means enabled | IO space !!! */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3); + break; + } + + return 0; +} + static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, struct pci_controller *hose, void __iomem *mbase) { - u32 lah, lal, pciah, pcial, sa; - int i, j; + int i, j, found_isa_hole = 0; /* Setup outbound memory windows */ for (i = j = 0; i < 3; i++) { @@ -1337,53 +1462,38 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, break; } - /* Calculate register values */ - lah = RES_TO_U32_HIGH(res->start); - lal = RES_TO_U32_LOW(res->start); - pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset); - pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset); - sa = res->end + 1 - res->start; - if (!is_power_of_2(sa) || sa < 0x100000 || - sa > 0xffffffffu) { - printk(KERN_WARNING "%s: Resource out of range\n", - port->node->full_name); - continue; - } - sa = (0xffffffffu << ilog2(sa)) | 0x1; - - /* Program register values */ - switch (j) { - case 0: - out_le32(mbase + PECFG_POM0LAH, pciah); - out_le32(mbase + PECFG_POM0LAL, pcial); - dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah); - dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal); - dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff); - dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3); - break; - case 1: - out_le32(mbase + PECFG_POM1LAH, pciah); - out_le32(mbase + PECFG_POM1LAL, pcial); - dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah); - dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal); - dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff); - dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3); - break; + /* Configure the resource */ + if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, + res->start, + res->start - hose->pci_mem_offset, + res->end + 1 - res->start, + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == hose->pci_mem_offset) + found_isa_hole = 1; } - j++; } - /* Configure IO, always 64K starting at 0 */ - if (hose->io_resource.flags & IORESOURCE_IO) { - lah = RES_TO_U32_HIGH(hose->io_base_phys); - lal = RES_TO_U32_LOW(hose->io_base_phys); - out_le32(mbase + PECFG_POM2LAH, 0); - out_le32(mbase + PECFG_POM2LAL, 0); - dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah); - dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal); - dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff); - dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3); - } + /* Handle ISA memory hole if not already covered */ + if (j <= 1 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, + hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", + hose->dn->full_name); + + /* Configure IO, always 64K starting at 0. We hard wire it to 64K ! + * Note also that it -has- to be region index 2 on this HW + */ + if (hose->io_resource.flags & IORESOURCE_IO) + ppc4xx_setup_one_pciex_POM(port, hose, mbase, + hose->io_base_phys, 0, + 0x10000, IORESOURCE_IO, 2); } static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index b3b73ae57d6d..01bce3784b0a 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> +#include <linux/spinlock.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/bootmem.h> @@ -38,6 +39,8 @@ static void qe_snums_init(void); static int qe_sdma_init(void); static DEFINE_SPINLOCK(qe_lock); +DEFINE_SPINLOCK(cmxgcr_lock); +EXPORT_SYMBOL(cmxgcr_lock); /* QE snum state */ enum qe_snum_state { diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c index 1d78071aad7d..ebb442ea1917 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc.c +++ b/arch/powerpc/sysdev/qe_lib/ucc.c @@ -18,6 +18,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/stddef.h> +#include <linux/spinlock.h> #include <linux/module.h> #include <asm/irq.h> @@ -26,9 +27,6 @@ #include <asm/qe.h> #include <asm/ucc.h> -DEFINE_SPINLOCK(cmxgcr_lock); -EXPORT_SYMBOL(cmxgcr_lock); - int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) { unsigned long flags; diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 51d97588e762..9cb03b71b9d6 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -4,7 +4,7 @@ ifdef CONFIG_PPC64 EXTRA_CFLAGS += -mno-minimal-toc endif -obj-y += xmon.o setjmp.o start.o nonstdio.o +obj-y += xmon.o start.o nonstdio.o ifdef CONFIG_XMON_DISASSEMBLY obj-y += ppc-dis.o ppc-opc.o diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 076368c8b8a9..8dfad7d9a004 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -41,6 +41,7 @@ #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/setjmp.h> +#include <asm/reg.h> #ifdef CONFIG_PPC64 #include <asm/hvcall.h> @@ -159,8 +160,6 @@ static int xmon_no_auto_backtrace; extern void xmon_enter(void); extern void xmon_leave(void); -extern void xmon_save_regs(struct pt_regs *); - #ifdef CONFIG_PPC64 #define REG "%.16lx" #define REGS_PER_LINE 4 @@ -532,7 +531,7 @@ int xmon(struct pt_regs *excp) struct pt_regs regs; if (excp == NULL) { - xmon_save_regs(®s); + ppc_save_regs(®s); excp = ®s; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8116a3328a19..8152fefc97b9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -43,6 +43,9 @@ config GENERIC_HWEIGHT config GENERIC_TIME def_bool y +config GENERIC_TIME_VSYSCALL + def_bool y + config GENERIC_CLOCKEVENTS def_bool y @@ -66,10 +69,15 @@ config PGSTE bool default y if KVM +config VIRT_CPU_ACCOUNTING + def_bool y + mainmenu "Linux Kernel Configuration" config S390 def_bool y + select USE_GENERIC_SMP_HELPERS if SMP + select HAVE_FUNCTION_TRACER select HAVE_OPROFILE select HAVE_KPROBES select HAVE_KRETPROBES @@ -225,6 +233,14 @@ config MARCH_Z9_109 Class (z9 BC). The kernel will be slightly faster but will not work on older machines such as the z990, z890, z900, and z800. +config MARCH_Z10 + bool "IBM System z10" + help + Select this to enable optimizations for IBM System z10. The + kernel will be slightly faster but will not work on older + machines such as the z990, z890, z900, z800, z9-109, z9-ec + and z9-bc. + endchoice config PACK_STACK @@ -343,16 +359,6 @@ config QDIO If unsure, say Y. -config QDIO_DEBUG - bool "Extended debugging information" - depends on QDIO - help - Say Y here to get extended debugging output in - /sys/kernel/debug/s390dbf/qdio... - Warning: this option reduces the performance of the QDIO module. - - If unsure, say N. - config CHSC_SCH tristate "Support for CHSC subchannels" help @@ -466,22 +472,9 @@ config PAGE_STATES hypervisor. The ESSA instruction is used to do the states changes between a page that has content and the unused state. -config VIRT_TIMER - bool "Virtual CPU timer support" - help - This provides a kernel interface for virtual CPU timers. - Default is disabled. - -config VIRT_CPU_ACCOUNTING - bool "Base user process accounting on virtual cpu timer" - depends on VIRT_TIMER - help - Select this option to use CPU timer deltas to do user - process accounting. - config APPLDATA_BASE bool "Linux - VM Monitor Stream, base infrastructure" - depends on PROC_FS && VIRT_TIMER=y + depends on PROC_FS help This provides a kernel interface for creating and updating z/VM APPLDATA monitor records. The monitor records are updated at certain time diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 792a4e7743ce..578c61f15a4b 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) +cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) #KBUILD_IMAGE is necessary for make rpm KBUILD_IMAGE :=arch/s390/boot/image diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h index 17a2636fec0a..f0b23fc759ba 100644 --- a/arch/s390/appldata/appldata.h +++ b/arch/s390/appldata/appldata.h @@ -26,10 +26,6 @@ #define CTL_APPLDATA_NET_SUM 2125 #define CTL_APPLDATA_PROC 2126 -#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) -#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) -#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) - struct appldata_ops { struct list_head list; struct ctl_table_header *sysctl_header; diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index a06a47cdd5e0..27b70d8a359c 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -10,6 +10,9 @@ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ +#define KMSG_COMPONENT "appldata" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -32,7 +35,6 @@ #include "appldata.h" -#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for sampling interval in milliseconds */ @@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, (unsigned long) ops->data, ops->size, ops->mod_lvl); if (rc != 0) { - P_ERROR("START DIAG 0xDC for %s failed, " - "return code: %d\n", ops->name, rc); + pr_err("Starting the data collection for %s " + "failed with rc=%d\n", ops->name, rc); module_put(ops->owner); } else ops->active = 1; @@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, (unsigned long) ops->data, ops->size, ops->mod_lvl); if (rc != 0) - P_ERROR("STOP DIAG 0xDC for %s failed, " - "return code: %d\n", ops->name, rc); + pr_err("Stopping the data collection for %s " + "failed with rc=%d\n", ops->name, rc); module_put(ops->owner); } spin_unlock(&appldata_ops_lock); diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 3b746556e1a3..fa741f84c5b9 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -67,7 +67,6 @@ static void appldata_get_net_sum_data(void *data) int i; struct appldata_net_sum_data *net_data; struct net_device *dev; - struct net_device_stats *stats; unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors, tx_errors, rx_dropped, tx_dropped, collisions; @@ -86,7 +85,8 @@ static void appldata_get_net_sum_data(void *data) collisions = 0; read_lock(&dev_base_lock); for_each_netdev(&init_net, dev) { - stats = dev->get_stats(dev); + const struct net_device_stats *stats = dev_get_stats(dev); + rx_packets += stats->rx_packets; tx_packets += stats->tx_packets; rx_bytes += stats->rx_bytes; diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index eb44f9f8ab91..55c80ffd42b9 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -9,6 +9,9 @@ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ +#define KMSG_COMPONENT "appldata" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -22,7 +25,6 @@ #include "appldata.h" -#define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */ #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) @@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data) (unsigned long) ops.data, new_size, ops.mod_lvl); if (rc != 0) - P_ERROR("os: START NEW DIAG 0xDC failed, " - "return code: %d, new size = %i\n", rc, - new_size); + pr_err("Starting a new OS data collection " + "failed with rc=%d\n", rc); rc = appldata_diag(APPLDATA_RECORD_OS_ID, APPLDATA_STOP_REC, (unsigned long) ops.data, ops.size, ops.mod_lvl); if (rc != 0) - P_ERROR("os: STOP OLD DIAG 0xDC failed, " - "return code: %d, old size = %i\n", rc, - ops.size); - else - P_INFO("os: old record size = %i stopped\n", - ops.size); + pr_err("Stopping a faulty OS data " + "collection failed with rc=%d\n", rc); } ops.size = new_size; } @@ -178,8 +175,8 @@ static int __init appldata_os_init(void) max_size = sizeof(struct appldata_os_data) + (NR_CPUS * sizeof(struct appldata_os_per_cpu)); if (max_size > APPLDATA_MAX_REC_SIZE) { - P_ERROR("Max. size of OS record = %i, bigger than maximum " - "record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE); + pr_err("Maximum OS record size %i exceeds the maximum " + "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE); rc = -ENOMEM; goto out; } diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index e33f32b54c08..c42cd898f68b 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -17,6 +17,9 @@ * */ +#define KMSG_COMPONENT "aes_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/err.h> @@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm) CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(sctx->fallback.cip)) { - printk(KERN_ERR "Error allocating fallback algo %s\n", name); + pr_err("Allocating AES fallback algorithm %s failed\n", + name); return PTR_ERR(sctx->fallback.blk); } @@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm) CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(sctx->fallback.blk)) { - printk(KERN_ERR "Error allocating fallback algo %s\n", name); + pr_err("Allocating AES fallback algorithm %s failed\n", + name); return PTR_ERR(sctx->fallback.blk); } @@ -515,9 +520,8 @@ static int __init aes_s390_init(void) /* z9 109 and z9 BC/EC only support 128 bit key length */ if (keylen_flag == AES_KEYLEN_128) - printk(KERN_INFO - "aes_s390: hardware acceleration only available for " - "128 bit keys\n"); + pr_info("AES hardware acceleration is only available for" + " 128-bit keys\n"); ret = crypto_register_alg(&aes_alg); if (ret) diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index b9a1ce1f28e4..b1e892a43816 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -3,10 +3,13 @@ * Hypervisor filesystem for Linux on s390. Diag 204 and 224 * implementation. * - * Copyright (C) IBM Corp. 2006 + * Copyright IBM Corp. 2006, 2008 * Author(s): Michael Holzheu <holzheu@de.ibm.com> */ +#define KMSG_COMPONENT "hypfs" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/types.h> #include <linux/errno.h> #include <linux/string.h> @@ -527,13 +530,14 @@ __init int hypfs_diag_init(void) int rc; if (diag204_probe()) { - printk(KERN_ERR "hypfs: diag 204 not working."); + pr_err("The hardware system does not support hypfs\n"); return -ENODATA; } rc = diag224_get_name_table(); if (rc) { diag204_free_buffer(); - printk(KERN_ERR "hypfs: could not get name table.\n"); + pr_err("The hardware system does not provide all " + "functions required by hypfs\n"); } return rc; } diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 36313801cd5c..9d4f8e6c0800 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -2,10 +2,13 @@ * arch/s390/hypfs/inode.c * Hypervisor filesystem for Linux on s390. * - * Copyright (C) IBM Corp. 2006 + * Copyright IBM Corp. 2006, 2008 * Author(s): Michael Holzheu <holzheu@de.ibm.com> */ +#define KMSG_COMPONENT "hypfs" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> @@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, else rc = hypfs_diag_create_files(sb, sb->s_root); if (rc) { - printk(KERN_ERR "hypfs: Update failed\n"); + pr_err("Updating the hypfs tree failed\n"); hypfs_delete_tree(sb->s_root); goto out; } @@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb) break; case opt_err: default: - printk(KERN_ERR "hypfs: Unrecognized mount option " - "\"%s\" or missing value\n", str); + pr_err("%s is not a valid mount option\n", str); return -EINVAL; } } @@ -280,8 +282,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) if (!sbi) return -ENOMEM; mutex_init(&sbi->lock); - sbi->uid = current->uid; - sbi->gid = current->gid; + sbi->uid = current_uid(); + sbi->gid = current_gid(); sb->s_fs_info = sbi; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) } hypfs_update_update(sb); sb->s_root = root_dentry; - printk(KERN_INFO "hypfs: Hypervisor filesystem mounted\n"); + pr_info("Hypervisor filesystem mounted\n"); return 0; err_tree: @@ -513,7 +515,7 @@ fail_sysfs: if (!MACHINE_IS_VM) hypfs_diag_exit(); fail_diag: - printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); + pr_err("Initialization of hypfs failed with rc=%i\n", rc); return rc; } diff --git a/arch/s390/include/asm/auxvec.h b/arch/s390/include/asm/auxvec.h index 0d340720fd99..a1f153e89133 100644 --- a/arch/s390/include/asm/auxvec.h +++ b/arch/s390/include/asm/auxvec.h @@ -1,4 +1,6 @@ #ifndef __ASMS390_AUXVEC_H #define __ASMS390_AUXVEC_H +#define AT_SYSINFO_EHDR 33 + #endif diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 384e3621e341..7efd0abe8887 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -47,7 +47,10 @@ #endif /* CONFIG_DEBUG_BUGVERBOSE */ -#define BUG() __EMIT_BUG(0) +#define BUG() do { \ + __EMIT_BUG(0); \ + for (;;); \ +} while (0) #define WARN_ON(x) ({ \ int __ret_warn_on = !!(x); \ diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h index 1fe2492baa8d..8bcf277c8468 100644 --- a/arch/s390/include/asm/byteorder.h +++ b/arch/s390/include/asm/byteorder.h @@ -11,32 +11,39 @@ #include <asm/types.h> -#ifdef __GNUC__ +#define __BIG_ENDIAN + +#ifndef __s390x__ +# define __SWAB_64_THRU_32__ +#endif #ifdef __s390x__ -static inline __u64 ___arch__swab64p(const __u64 *x) +static inline __u64 __arch_swab64p(const __u64 *x) { __u64 result; asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); return result; } +#define __arch_swab64p __arch_swab64p -static inline __u64 ___arch__swab64(__u64 x) +static inline __u64 __arch_swab64(__u64 x) { __u64 result; asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); return result; } +#define __arch_swab64 __arch_swab64 -static inline void ___arch__swab64s(__u64 *x) +static inline void __arch_swab64s(__u64 *x) { - *x = ___arch__swab64p(x); + *x = __arch_swab64p(x); } +#define __arch_swab64s __arch_swab64s #endif /* __s390x__ */ -static inline __u32 ___arch__swab32p(const __u32 *x) +static inline __u32 __arch_swab32p(const __u32 *x) { __u32 result; @@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x) #endif /* __s390x__ */ return result; } +#define __arch_swab32p __arch_swab32p -static inline __u32 ___arch__swab32(__u32 x) +#ifdef __s390x__ +static inline __u32 __arch_swab32(__u32 x) { -#ifndef __s390x__ - return ___arch__swab32p(&x); -#else /* __s390x__ */ __u32 result; asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); return result; -#endif /* __s390x__ */ -} - -static __inline__ void ___arch__swab32s(__u32 *x) -{ - *x = ___arch__swab32p(x); } +#define __arch_swab32 __arch_swab32 +#endif /* __s390x__ */ -static __inline__ __u16 ___arch__swab16p(const __u16 *x) +static inline __u16 __arch_swab16p(const __u16 *x) { __u16 result; @@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x) #endif /* __s390x__ */ return result; } +#define __arch_swab16p __arch_swab16p -static __inline__ __u16 ___arch__swab16(__u16 x) -{ - return ___arch__swab16p(&x); -} - -static __inline__ void ___arch__swab16s(__u16 *x) -{ - *x = ___arch__swab16p(x); -} - -#ifdef __s390x__ -#define __arch__swab64(x) ___arch__swab64(x) -#define __arch__swab64p(x) ___arch__swab64p(x) -#define __arch__swab64s(x) ___arch__swab64s(x) -#endif /* __s390x__ */ -#define __arch__swab32(x) ___arch__swab32(x) -#define __arch__swab16(x) ___arch__swab16(x) -#define __arch__swab32p(x) ___arch__swab32p(x) -#define __arch__swab16p(x) ___arch__swab16p(x) -#define __arch__swab32s(x) ___arch__swab32s(x) -#define __arch__swab16s(x) ___arch__swab16s(x) - -#ifndef __s390x__ -#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __BYTEORDER_HAS_U64__ -# define __SWAB_64_THRU_32__ -#endif -#else /* __s390x__ */ -#define __BYTEORDER_HAS_U64__ -#endif /* __s390x__ */ - -#endif /* __GNUC__ */ - -#include <linux/byteorder/big_endian.h> +#include <linux/byteorder.h> #endif /* _S390_BYTEORDER_H */ diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 261785ab5b22..d480f39d65e6 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t; #include <asm/system.h> /* for save_access_regs */ #include <asm/mmu_context.h> +#include <asm/vdso.h> + +extern unsigned int vdso_enabled; + /* * This is used to ensure we don't load something for the wrong architecture. */ @@ -191,4 +195,16 @@ do { \ current->mm->context.noexec == 0; \ }) +#define ARCH_DLINFO \ +do { \ + if (vdso_enabled) \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso_base); \ +} while (0) + +struct linux_binprm; + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +int arch_setup_additional_pages(struct linux_binprm *, int); + #endif diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h index 8be1f3a58042..ef6170995076 100644 --- a/arch/s390/include/asm/fcx.h +++ b/arch/s390/include/asm/fcx.h @@ -248,8 +248,8 @@ struct dcw { #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ TCCB_MAX_DCW * sizeof(struct dcw) + \ sizeof(struct tccb_tcat)) -#define TCCB_SAC_DEFAULT 0xf901 -#define TCCB_SAC_INTRG 0xf902 +#define TCCB_SAC_DEFAULT 0x1ffe +#define TCCB_SAC_INTRG 0x1fff /** * struct tccb_tcah - Transport-Command-Area Header (TCAH) diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h new file mode 100644 index 000000000000..5a5bc75e19d4 --- /dev/null +++ b/arch/s390/include/asm/ftrace.h @@ -0,0 +1,8 @@ +#ifndef _ASM_S390_FTRACE_H +#define _ASM_S390_FTRACE_H + +#ifndef __ASSEMBLY__ +extern void _mcount(void); +#endif + +#endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h index 34bb8916db4f..1420a1115948 100644 --- a/arch/s390/include/asm/isc.h +++ b/arch/s390/include/asm/isc.h @@ -17,6 +17,7 @@ #define CHSC_SCH_ISC 7 /* CHSC subchannels */ /* Adapter interrupts. */ #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ +#define AP_ISC 6 /* adjunct processor (crypto) devices */ /* Functions for registration of I/O interruption subclasses */ void isc_register(unsigned int isc); diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index d2b4ff831477..3b59216e6284 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { struct list_head pgtable_list; unsigned long asce_bits; unsigned long asce_limit; + unsigned long vdso_base; int noexec; int has_pgste; /* The mmu context has extended page tables */ int alloc_pgste; /* cloned contexts will have extended page tables */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 991ba939408c..32e8f6aa4384 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order); #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#define __HAVE_ARCH_GATE_AREA 1 + #endif /* _S390_PAGE_H */ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f5b2bf3d7c1d..b2658b9220fe 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *); static inline void clear_table(unsigned long *s, unsigned long val, size_t n) { + typedef struct { char _[n]; } addrtype; + *s = val; n = (n / 256) - 1; asm volatile( @@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) "0: mvc 256(256,%0),0(%0)\n" " la %0,256(%0)\n" " brct %1,0b\n" - : "+a" (s), "+d" (n)); + : "+a" (s), "+d" (n), "=m" (*(addrtype *) s) + : "m" (*(addrtype *) s)); } static inline void crst_table_init(unsigned long *crst, unsigned long entry) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 4af80af2a88f..066b99502e09 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -13,6 +13,7 @@ #ifndef __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H +#include <linux/linkage.h> #include <asm/ptrace.h> #ifdef __KERNEL__ @@ -258,7 +259,7 @@ static inline void enabled_wait(void) * Function to drop a processor into disabled wait state */ -static inline void disabled_wait(unsigned long code) +static inline void ATTRIB_NORET disabled_wait(unsigned long code) { unsigned long ctl_buf; psw_t dw_psw; @@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code) : "=m" (ctl_buf) : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); #endif /* __s390x__ */ + while (1); } /* diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 4734c3f05354..27fc1746de15 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -373,16 +373,16 @@ struct qdio_initialize { #define QDIO_FLAG_SYNC_OUTPUT 0x02 #define QDIO_FLAG_PCI_OUT 0x10 -extern int qdio_initialize(struct qdio_initialize *init_data); -extern int qdio_allocate(struct qdio_initialize *init_data); -extern int qdio_establish(struct qdio_initialize *init_data); +extern int qdio_initialize(struct qdio_initialize *); +extern int qdio_allocate(struct qdio_initialize *); +extern int qdio_establish(struct qdio_initialize *); extern int qdio_activate(struct ccw_device *); -extern int do_QDIO(struct ccw_device*, unsigned int flags, - int q_nr, int qidx, int count); -extern int qdio_cleanup(struct ccw_device*, int how); -extern int qdio_shutdown(struct ccw_device*, int how); +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, + int q_nr, int bufnr, int count); +extern int qdio_cleanup(struct ccw_device*, int); +extern int qdio_shutdown(struct ccw_device*, int); extern int qdio_free(struct ccw_device *); -extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev); +extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); #endif /* __QDIO_H__ */ diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index e16d56f8dfe1..ec403d4304f8 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -61,6 +61,7 @@ typedef enum { ec_schedule=0, ec_call_function, + ec_call_function_single, ec_bit_last } ec_bit_sig; diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ae89cf2478fc..024b91e06239 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu); extern struct mutex smp_cpu_state_mutex; extern int smp_cpu_polarization[]; -extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #endif #ifndef CONFIG_SMP diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 79d01343f8b0..ad93212d9e16 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) return r0; } +/* + * Service level reporting interface. + */ +struct service_level { + struct list_head list; + void (*seq_print)(struct seq_file *, struct service_level *); +}; + +int register_service_level(struct service_level *); +int unregister_service_level(struct service_level *); + #endif /* __ASM_S390_SYSINFO_H */ diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 819e7d99ca0c..024ef42ed6d7 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -12,6 +12,7 @@ #define __ASM_SYSTEM_H #include <linux/kernel.h> +#include <linux/errno.h> #include <asm/types.h> #include <asm/ptrace.h> #include <asm/setup.h> @@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); -#else -#define account_vtime(x) do { /* empty */ } while (0) -#endif #ifdef CONFIG_PFAULT extern void pfault_irq_init(void); @@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask) #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) -int stfle(unsigned long long *list, int doublewords); - #ifdef CONFIG_SMP extern void smp_ctl_set_bit(int cr, int bit); @@ -438,6 +433,23 @@ static inline unsigned int stfl(void) return S390_lowcore.stfl_fac_list; } +static inline int __stfle(unsigned long long *list, int doublewords) +{ + typedef struct { unsigned long long _[doublewords]; } addrtype; + register unsigned long __nr asm("0") = doublewords - 1; + + asm volatile(".insn s,0xb2b00000,%0" /* stfle */ + : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); + return __nr + 1; +} + +static inline int stfle(unsigned long long *list, int doublewords) +{ + if (!(stfl() & (1UL << 24))) + return -EOPNOTSUPP; + return __stfle(list, doublewords); +} + static inline unsigned short stap(void) { unsigned short cpu_address; diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h index d98d79e35cd6..61705d60f995 100644 --- a/arch/s390/include/asm/timer.h +++ b/arch/s390/include/asm/timer.h @@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer); extern void init_cpu_vtimer(void); extern void vtime_init(void); -#ifdef CONFIG_VIRT_TIMER - extern void vtime_start_cpu_timer(void); extern void vtime_stop_cpu_timer(void); -#else - -static inline void vtime_start_cpu_timer(void) { } -static inline void vtime_stop_cpu_timer(void) { } - -#endif /* CONFIG_VIRT_TIMER */ - #endif /* __KERNEL__ */ #endif /* _ASM_S390_TIMER_H */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h new file mode 100644 index 000000000000..a44f4fe16a35 --- /dev/null +++ b/arch/s390/include/asm/vdso.h @@ -0,0 +1,39 @@ +#ifndef __S390_VDSO_H__ +#define __S390_VDSO_H__ + +#ifdef __KERNEL__ + +/* Default link addresses for the vDSOs */ +#define VDSO32_LBASE 0 +#define VDSO64_LBASE 0 + +#define VDSO_VERSION_STRING LINUX_2.6.26 + +#ifndef __ASSEMBLY__ + +/* + * Note about this structure: + * + * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this + * structure is supposed to be known only to the function in the vdso + * itself and may change without notice. + */ + +struct vdso_data { + __u64 tb_update_count; /* Timebase atomicity ctr 0x00 */ + __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ + __u64 xtime_clock_sec; /* Kernel time 0x10 */ + __u64 xtime_clock_nsec; /* 0x18 */ + __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ + __u64 wtom_clock_nsec; /* 0x28 */ + __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ + __u32 tz_dsttime; /* Type of dst correction 0x34 */ +}; + +extern struct vdso_data *vdso_data; + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* __S390_VDSO_H__ */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 50f657e77344..3edc6c6f258b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -2,6 +2,11 @@ # Makefile for the linux kernel. # +ifdef CONFIG_FUNCTION_TRACER +# Do not trace early boot code +CFLAGS_REMOVE_early.o = -pg +endif + # # Passing null pointers is ok for smp code, since we access the lowcore here. # @@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -obj-y := bitmap.o traps.o time.o process.o base.o early.o \ - setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ - s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o +obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ + processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ + s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ + vdso.o vtime.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) @@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ compat_wrapper.o compat_exec_domain.o \ $(compat-obj-y) -obj-$(CONFIG_VIRT_TIMER) += vtime.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) +# vdso +obj-$(CONFIG_64BIT) += vdso64/ +obj-$(CONFIG_32BIT) += vdso32/ +obj-$(CONFIG_COMPAT) += vdso32/ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 3d144e6020c6..e641f60bac99 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -6,6 +6,7 @@ #include <linux/sched.h> #include <linux/kbuild.h> +#include <asm/vdso.h> int main(void) { @@ -38,5 +39,19 @@ int main(void) DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); + BLANK(); + /* timeval/timezone offsets for use by vdso */ + DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count)); + DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); + DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); + DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); + DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); + DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); + DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); + /* constants used by the vdso */ + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); + DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); + return 0; } diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 4646382af34f..6cc87d8c8682 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -148,9 +148,9 @@ asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user { int retval; - if (!(retval = put_user(high2lowuid(current->uid), ruid)) && - !(retval = put_user(high2lowuid(current->euid), euid))) - retval = put_user(high2lowuid(current->suid), suid); + if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && + !(retval = put_user(high2lowuid(current->cred->euid), euid))) + retval = put_user(high2lowuid(current->cred->suid), suid); return retval; } @@ -165,9 +165,9 @@ asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user { int retval; - if (!(retval = put_user(high2lowgid(current->gid), rgid)) && - !(retval = put_user(high2lowgid(current->egid), egid))) - retval = put_user(high2lowgid(current->sgid), sgid); + if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && + !(retval = put_user(high2lowgid(current->cred->egid), egid))) + retval = put_user(high2lowgid(current->cred->sgid), sgid); return retval; } @@ -217,20 +217,20 @@ asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist) if (gidsetsize < 0) return -EINVAL; - get_group_info(current->group_info); - i = current->group_info->ngroups; + get_group_info(current->cred->group_info); + i = current->cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->group_info)) { + if (groups16_to_user(grouplist, current->cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->group_info); + put_group_info(current->cred->group_info); return i; } @@ -261,22 +261,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) asmlinkage long sys32_getuid16(void) { - return high2lowuid(current->uid); + return high2lowuid(current->cred->uid); } asmlinkage long sys32_geteuid16(void) { - return high2lowuid(current->euid); + return high2lowuid(current->cred->euid); } asmlinkage long sys32_getgid16(void) { - return high2lowgid(current->gid); + return high2lowgid(current->cred->gid); } asmlinkage long sys32_getegid16(void) { - return high2lowgid(current->egid); + return high2lowgid(current->cred->egid); } /* diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d8c1131e0815..3e8b8816f309 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -7,6 +7,9 @@ * Christian Borntraeger (cborntra@de.ibm.com), */ +#define KMSG_COMPONENT "cpcmd" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) (((unsigned long)response + rlen) >> 31)) { lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); if (!lowbuf) { - printk(KERN_WARNING - "cpcmd: could not allocate response buffer\n"); + pr_warning("The cpcmd kernel function failed to " + "allocate a response buffer\n"); return -ENOMEM; } spin_lock_irqsave(&cpcmd_lock, flags); diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index d80fcd4a7fe1..ba03fc0a3a56 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -10,6 +10,9 @@ * Bugreports to: <Linux390@de.ibm.com> */ +#define KMSG_COMPONENT "s390dbf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode) debug_info_free(rc); } while (1); - if(!rc || (mode == NO_AREAS)) + if (mode == NO_AREAS) goto out; for(i = 0; i < in->nr_areas; i++){ @@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, /* Since debugfs currently does not support uid/gid other than root, */ /* we do not allow gid/uid != 0 until we get support for that. */ if ((uid != 0) || (gid != 0)) - printk(KERN_WARNING "debug: Warning - Currently only uid/gid " - "= 0 are supported. Using root as owner now!"); + pr_warning("Root becomes the owner of all s390dbf files " + "in sysfs\n"); if (!initialized) BUG(); mutex_lock(&debug_mutex); @@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, debug_register_view(rc, &debug_pages_view); out: if (!rc){ - printk(KERN_ERR "debug: debug_register failed for %s\n",name); + pr_err("Registering debug feature %s failed\n", name); } mutex_unlock(&debug_mutex); return rc; @@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) if(pages_per_area > 0){ new_areas = debug_areas_alloc(pages_per_area, nr_areas); if(!new_areas) { - printk(KERN_WARNING "debug: could not allocate memory "\ - "for pagenumber: %i\n",pages_per_area); + pr_info("Allocating memory for %i pages failed\n", + pages_per_area); rc = -ENOMEM; goto out; } @@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) memset(id->active_entries,0,sizeof(int)*id->nr_areas); memset(id->active_pages, 0, sizeof(int)*id->nr_areas); spin_unlock_irqrestore(&id->lock,flags); - printk(KERN_INFO "debug: %s: set new size (%i pages)\n"\ - ,id->name, pages_per_area); + pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area); out: return rc; } @@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level) spin_lock_irqsave(&id->lock,flags); if(new_level == DEBUG_OFF_LEVEL){ id->level = DEBUG_OFF_LEVEL; - printk(KERN_INFO "debug: %s: switched off\n",id->name); + pr_info("%s: switched off\n",id->name); } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { - printk(KERN_INFO - "debug: %s: level %i is out of range (%i - %i)\n", + pr_info("%s: level %i is out of range (%i - %i)\n", id->name, new_level, 0, DEBUG_MAX_LEVEL); } else { id->level = new_level; @@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, id , &debug_file_ops); if (!pde){ - printk(KERN_WARNING "debug: debugfs_create_file() failed!"\ - " Cannot register view %s/%s\n", id->name,view->name); + pr_err("Registering view %s/%s failed due to out of " + "memory\n", id->name,view->name); rc = -1; goto out; } @@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) break; } if (i == DEBUG_MAX_VIEWS) { - printk(KERN_WARNING "debug: cannot register view %s/%s\n", - id->name,view->name); - printk(KERN_WARNING - "debug: maximum number of views reached (%i)!\n", i); + pr_err("Registering view %s/%s would exceed the maximum " + "number of views %i\n", id->name, view->name, i); debugfs_remove(pde); rc = -1; } else { @@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view, new_level = debug_get_uint(str); } if(new_level < 0) { - printk(KERN_INFO "debug: level `%s` is not valid\n", str); + pr_warning("%s is not a valid level for a debug " + "feature\n", str); rc = -EINVAL; } else { debug_set_level(id, new_level); @@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view, goto out; } - printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]); + pr_info("Flushing debug data failed because %c is not a valid " + "area\n", input_buf[0]); out: *offset += user_len; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 198ea18a534d..55de521aef77 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT * R15 - kernel stack pointer */ - .macro STORE_TIMER lc_offset -#ifdef CONFIG_VIRT_CPU_ACCOUNTING - stpt \lc_offset -#endif - .endm - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .macro UPDATE_VTIME lc_from,lc_to,lc_sum lm %r10,%r11,\lc_from sl %r10,\lc_to @@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT al %r10,BASED(.Lc_1) 1: stm %r10,%r11,\lc_sum .endm -#endif .macro SAVE_ALL_BASE savearea stm %r12,%r15,\savearea @@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT ni \psworg+1,0xfd # clear wait state bit .endif lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user - STORE_TIMER __LC_EXIT_TIMER + stpt __LC_EXIT_TIMER lpsw \psworg # back to caller .endm @@ -247,20 +239,18 @@ __critical_start: .globl system_call system_call: - STORE_TIMER __LC_SYNC_ENTER_TIMER + stpt __LC_SYNC_ENTER_TIMER sysc_saveall: SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA lh %r7,0x8a # get svc number from lowcore -#ifdef CONFIG_VIRT_CPU_ACCOUNTING sysc_vtime: UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER sysc_stime: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER sysc_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -#endif sysc_do_svc: l %r9,__LC_THREAD_INFO # load pointer to thread_info struct ltr %r7,%r7 # test for svc 0 @@ -436,7 +426,7 @@ ret_from_fork: basr %r14,%r1 TRACE_IRQS_ON stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - b BASED(sysc_return) + b BASED(sysc_tracenogo) # # kernel_execve function needs to deal with pt_regs that is not @@ -490,20 +480,18 @@ pgm_check_handler: * we just ignore the PER event (FIXME: is there anything we have to do * for LPSW?). */ - STORE_TIMER __LC_SYNC_ENTER_TIMER + stpt __LC_SYNC_ENTER_TIMER SAVE_ALL_BASE __LC_SAVE_AREA tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception bnz BASED(pgm_per) # got per exception -> special case SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(pgm_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER pgm_no_vtime: -#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF l %r3,__LC_PGM_ILC # load program interruption code @@ -536,14 +524,12 @@ pgm_per: pgm_per_std: SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(pgm_no_vtime2) UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER pgm_no_vtime2: -#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF l %r1,__TI_task(%r9) @@ -565,11 +551,9 @@ pgm_no_vtime2: pgm_svcper: SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA -#ifdef CONFIG_VIRT_CPU_ACCOUNTING UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -#endif lh %r7,0x8a # get svc number from lowcore l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF @@ -599,19 +583,17 @@ kernel_per: .globl io_int_handler io_int_handler: - STORE_TIMER __LC_ASYNC_ENTER_TIMER + stpt __LC_ASYNC_ENTER_TIMER stck __LC_INT_CLOCK SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(io_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER io_no_vtime: -#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ @@ -741,19 +723,17 @@ io_notify_resume: .globl ext_int_handler ext_int_handler: - STORE_TIMER __LC_ASYNC_ENTER_TIMER + stpt __LC_ASYNC_ENTER_TIMER stck __LC_INT_CLOCK SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? bz BASED(ext_no_vtime) UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER ext_no_vtime: -#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF la %r2,SP_PTREGS(%r15) # address of register-save area @@ -776,7 +756,6 @@ mcck_int_handler: la %r12,__LC_MCK_OLD_PSW tm __LC_MCCK_CODE,0x80 # system damage? bo BASED(mcck_int_main) # yes -> rest of mcck code invalid -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? @@ -793,9 +772,7 @@ mcck_int_handler: la %r14,__LC_LAST_UPDATE_TIMER 0: spt 0(%r14) mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) -1: -#endif - tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? +1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? bno BASED(mcck_int_main) # no -> skip cleanup critical tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit bnz BASED(mcck_int_main) # from user -> load async stack @@ -812,7 +789,6 @@ mcck_int_main: be BASED(0f) l %r15,__LC_PANIC_STACK # load panic stack 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? bno BASED(mcck_no_vtime) # no -> skip cleanup critical tm SP_PSW+1(%r15),0x01 # interrupting from user ? @@ -821,7 +797,6 @@ mcck_int_main: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mcck_no_vtime: -#endif l %r9,__LC_THREAD_INFO # load pointer to thread_info struct la %r2,SP_PTREGS(%r15) # load pt_regs l %r1,BASED(.Ls390_mcck) @@ -843,16 +818,13 @@ mcck_no_vtime: mcck_return: mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? bno BASED(0f) lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 stpt __LC_EXIT_TIMER lpsw __LC_RETURN_MCCK_PSW # back to caller -0: -#endif - lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 +0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 lpsw __LC_RETURN_MCCK_PSW # back to caller RESTORE_ALL __LC_RETURN_MCCK_PSW,0 @@ -976,13 +948,11 @@ cleanup_system_call: b BASED(1f) 0: la %r12,__LC_SAVE_AREA+32 1: -#ifdef CONFIG_VIRT_CPU_ACCOUNTING clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) bh BASED(0f) mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) bhe BASED(cleanup_vtime) -#endif clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) bh BASED(0f) mvc __LC_SAVE_AREA(16),0(%r12) @@ -993,7 +963,6 @@ cleanup_system_call: l %r12,__LC_SAVE_AREA+48 # argh st %r15,12(%r12) lh %r7,0x8a -#ifdef CONFIG_VIRT_CPU_ACCOUNTING cleanup_vtime: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) bhe BASED(cleanup_stime) @@ -1004,18 +973,15 @@ cleanup_stime: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER cleanup_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -#endif mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) la %r12,__LC_RETURN_PSW br %r14 cleanup_system_call_insn: .long sysc_saveall + 0x80000000 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long system_call + 0x80000000 .long sysc_vtime + 0x80000000 .long sysc_stime + 0x80000000 .long sysc_update + 0x80000000 -#endif cleanup_sysc_return: mvc __LC_RETURN_PSW(4),0(%r12) @@ -1026,11 +992,9 @@ cleanup_sysc_return: cleanup_sysc_leave: clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) be BASED(2f) -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) be BASED(2f) -#endif mvc __LC_RETURN_PSW(8),SP_PSW(%r15) c %r12,BASED(.Lmck_old_psw) bne BASED(0f) @@ -1043,9 +1007,7 @@ cleanup_sysc_leave: br %r14 cleanup_sysc_leave_insn: .long sysc_done - 4 + 0x80000000 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long sysc_done - 8 + 0x80000000 -#endif cleanup_io_return: mvc __LC_RETURN_PSW(4),0(%r12) @@ -1056,11 +1018,9 @@ cleanup_io_return: cleanup_io_leave: clc 4(4,%r12),BASED(cleanup_io_leave_insn) be BASED(2f) -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) be BASED(2f) -#endif mvc __LC_RETURN_PSW(8),SP_PSW(%r15) c %r12,BASED(.Lmck_old_psw) bne BASED(0f) @@ -1073,9 +1033,7 @@ cleanup_io_leave: br %r14 cleanup_io_leave_insn: .long io_done - 4 + 0x80000000 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .long io_done - 8 + 0x80000000 -#endif /* * Integer constants diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 89c121ae6339..16bb4fd1a403 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ #define LOCKDEP_SYS_EXIT #endif - .macro STORE_TIMER lc_offset -#ifdef CONFIG_VIRT_CPU_ACCOUNTING - stpt \lc_offset -#endif - .endm - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .macro UPDATE_VTIME lc_from,lc_to,lc_sum lg %r10,\lc_from slg %r10,\lc_to alg %r10,\lc_sum stg %r10,\lc_sum .endm -#endif /* * Register usage in interrupt handlers: @@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ni \psworg+1,0xfd # clear wait state bit .endif lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user - STORE_TIMER __LC_EXIT_TIMER + stpt __LC_EXIT_TIMER lpswe \psworg # back to caller .endm @@ -233,20 +225,18 @@ __critical_start: .globl system_call system_call: - STORE_TIMER __LC_SYNC_ENTER_TIMER + stpt __LC_SYNC_ENTER_TIMER sysc_saveall: SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore -#ifdef CONFIG_VIRT_CPU_ACCOUNTING sysc_vtime: UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER sysc_stime: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER sysc_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -#endif sysc_do_svc: lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct ltgr %r7,%r7 # test for svc 0 @@ -417,7 +407,7 @@ ret_from_fork: 0: brasl %r14,schedule_tail TRACE_IRQS_ON stosm 24(%r15),0x03 # reenable interrupts - j sysc_return + j sysc_tracenogo # # kernel_execve function needs to deal with pt_regs that is not @@ -469,20 +459,18 @@ pgm_check_handler: * we just ignore the PER event (FIXME: is there anything we have to do * for LPSW?). */ - STORE_TIMER __LC_SYNC_ENTER_TIMER + stpt __LC_SYNC_ENTER_TIMER SAVE_ALL_BASE __LC_SAVE_AREA tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception jnz pgm_per # got per exception -> special case SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER pgm_no_vtime: -#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct mvc SP_ARGS(8,%r15),__LC_LAST_BREAK TRACE_IRQS_OFF @@ -516,14 +504,12 @@ pgm_per: pgm_per_std: SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime2 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER pgm_no_vtime2: -#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF lg %r1,__TI_task(%r9) @@ -545,11 +531,9 @@ pgm_no_vtime2: pgm_svcper: SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA -#ifdef CONFIG_VIRT_CPU_ACCOUNTING UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -#endif llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r1,__TI_task(%r9) @@ -575,19 +559,17 @@ kernel_per: */ .globl io_int_handler io_int_handler: - STORE_TIMER __LC_ASYNC_ENTER_TIMER + stpt __LC_ASYNC_ENTER_TIMER stck __LC_INT_CLOCK SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz io_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER io_no_vtime: -#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF la %r2,SP_PTREGS(%r15) # address of register-save area @@ -739,19 +721,17 @@ io_notify_resume: */ .globl ext_int_handler ext_int_handler: - STORE_TIMER __LC_ASYNC_ENTER_TIMER + stpt __LC_ASYNC_ENTER_TIMER stck __LC_INT_CLOCK SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz ext_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER ext_no_vtime: -#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF la %r2,SP_PTREGS(%r15) # address of register-save area @@ -773,7 +753,6 @@ mcck_int_handler: la %r12,__LC_MCK_OLD_PSW tm __LC_MCCK_CODE,0x80 # system damage? jo mcck_int_main # yes -> rest of mcck code invalid -#ifdef CONFIG_VIRT_CPU_ACCOUNTING la %r14,4095 mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) @@ -791,9 +770,7 @@ mcck_int_handler: la %r14,__LC_LAST_UPDATE_TIMER 0: spt 0(%r14) mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) -1: -#endif - tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? +1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? jno mcck_int_main # no -> skip cleanup critical tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit jnz mcck_int_main # from user -> load kernel stack @@ -809,7 +786,6 @@ mcck_int_main: jz 0f lg %r15,__LC_PANIC_STACK # load panic stack 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? jno mcck_no_vtime # no -> no timer update tm SP_PSW+1(%r15),0x01 # interrupting from user ? @@ -818,7 +794,6 @@ mcck_int_main: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mcck_no_vtime: -#endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct la %r2,SP_PTREGS(%r15) # load pt_regs brasl %r14,s390_do_machine_check @@ -839,14 +814,11 @@ mcck_return: mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f stpt __LC_EXIT_TIMER -0: -#endif - lpswe __LC_RETURN_MCCK_PSW # back to caller +0: lpswe __LC_RETURN_MCCK_PSW # back to caller /* * Restart interruption handler, kick starter for additional CPUs @@ -964,13 +936,11 @@ cleanup_system_call: j 1f 0: la %r12,__LC_SAVE_AREA+64 1: -#ifdef CONFIG_VIRT_CPU_ACCOUNTING clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) jh 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) jhe cleanup_vtime -#endif clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) jh 0f mvc __LC_SAVE_AREA(32),0(%r12) @@ -981,7 +951,6 @@ cleanup_system_call: lg %r12,__LC_SAVE_AREA+96 # argh stg %r15,24(%r12) llgh %r7,__LC_SVC_INT_CODE -#ifdef CONFIG_VIRT_CPU_ACCOUNTING cleanup_vtime: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) jhe cleanup_stime @@ -992,18 +961,15 @@ cleanup_stime: UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER cleanup_update: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -#endif mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) la %r12,__LC_RETURN_PSW br %r14 cleanup_system_call_insn: .quad sysc_saveall -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .quad system_call .quad sysc_vtime .quad sysc_stime .quad sysc_update -#endif cleanup_sysc_return: mvc __LC_RETURN_PSW(8),0(%r12) @@ -1014,11 +980,9 @@ cleanup_sysc_return: cleanup_sysc_leave: clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) je 2f -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) je 2f -#endif mvc __LC_RETURN_PSW(16),SP_PSW(%r15) cghi %r12,__LC_MCK_OLD_PSW jne 0f @@ -1031,9 +995,7 @@ cleanup_sysc_leave: br %r14 cleanup_sysc_leave_insn: .quad sysc_done - 4 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .quad sysc_done - 8 -#endif cleanup_io_return: mvc __LC_RETURN_PSW(8),0(%r12) @@ -1044,11 +1006,9 @@ cleanup_io_return: cleanup_io_leave: clc 8(8,%r12),BASED(cleanup_io_leave_insn) je 2f -#ifdef CONFIG_VIRT_CPU_ACCOUNTING mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) je 2f -#endif mvc __LC_RETURN_PSW(16),SP_PSW(%r15) cghi %r12,__LC_MCK_OLD_PSW jne 0f @@ -1061,9 +1021,7 @@ cleanup_io_leave: br %r14 cleanup_io_leave_insn: .quad io_done - 4 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING .quad io_done - 8 -#endif /* * Integer constants diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 83477c7dc743..ec7e35f6055b 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -461,6 +461,55 @@ start: .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff +# +# startup-code at 0x10000, running in absolute addressing mode +# this is called either by the ipl loader or directly by PSW restart +# or linload or SALIPL +# + .org 0x10000 +startup:basr %r13,0 # get base +.LPG0: + +#ifndef CONFIG_MARCH_G5 + # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} + stidp __LC_CPUID # store cpuid + lhi %r0,(3f-2f) / 2 + la %r1,2f-.LPG0(%r13) +0: clc __LC_CPUID+4(2),0(%r1) + jne 3f + lpsw 1f-.LPG0(13) # machine type not good enough, crash + .align 16 +1: .long 0x000a0000,0x00000000 +2: +#if defined(CONFIG_MARCH_Z10) + .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 +#elif defined(CONFIG_MARCH_Z9_109) + .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 +#elif defined(CONFIG_MARCH_Z990) + .short 0x9672, 0x2064, 0x2066 +#elif defined(CONFIG_MARCH_Z900) + .short 0x9672 +#endif +3: la %r1,2(%r1) + brct %r0,0b +#endif + + l %r13,0f-.LPG0(%r13) + b 0(%r13) +0: .long startup_continue + +# +# params at 10400 (setup.h) +# + .org PARMAREA + .long 0,0 # IPL_DEVICE + .long 0,0 # INITRD_START + .long 0,0 # INITRD_SIZE + + .org COMMAND_LINE + .byte "root=/dev/ram0 ro" + .byte 0 + #ifdef CONFIG_64BIT #include "head64.S" #else diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index a816e2de32b9..db476d114caa 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -10,34 +10,13 @@ * */ -# -# startup-code at 0x10000, running in absolute addressing mode -# this is called either by the ipl loader or directly by PSW restart -# or linload or SALIPL -# - .org 0x10000 -startup:basr %r13,0 # get base -.LPG0: l %r13,0f-.LPG0(%r13) - b 0(%r13) -0: .long startup_continue - -# -# params at 10400 (setup.h) -# - .org PARMAREA - .long 0,0 # IPL_DEVICE - .long 0,0 # INITRD_START - .long 0,0 # INITRD_SIZE - - .org COMMAND_LINE - .byte "root=/dev/ram0 ro" - .byte 0 - .org 0x11000 startup_continue: basr %r13,0 # get base -.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) +.LPG1: + + mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore @@ -50,7 +29,6 @@ startup_continue: ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE st %r15,__LC_KERNEL_STACK # set end of kernel stack ahi %r15,-96 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain # # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, # and create a kernel NSS if the SAVESYS= parm is defined diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 1d06961e87b3..3ccd36b24b8f 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -10,29 +10,6 @@ * */ -# -# startup-code at 0x10000, running in absolute addressing mode -# this is called either by the ipl loader or directly by PSW restart -# or linload or SALIPL -# - .org 0x10000 -startup:basr %r13,0 # get base -.LPG0: l %r13,0f-.LPG0(%r13) - b 0(%r13) -0: .long startup_continue - -# -# params at 10400 (setup.h) -# - .org PARMAREA - .quad 0 # IPL_DEVICE - .quad 0 # INITRD_START - .quad 0 # INITRD_SIZE - - .org COMMAND_LINE - .byte "root=/dev/ram0 ro" - .byte 0 - .org 0x11000 startup_continue: @@ -119,7 +96,6 @@ startup_continue: aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE stg %r15,__LC_KERNEL_STACK # set end of kernel stack aghi %r15,-160 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain # # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, # and create a kernel NSS if the SAVESYS= parm is defined diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S new file mode 100644 index 000000000000..397d131a345f --- /dev/null +++ b/arch/s390/kernel/mcount.S @@ -0,0 +1,56 @@ +/* + * Copyright IBM Corp. 2008 + * + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, + * + */ + +#ifndef CONFIG_64BIT +.globl _mcount +_mcount: + stm %r0,%r5,8(%r15) + st %r14,56(%r15) + lr %r1,%r15 + ahi %r15,-96 + l %r3,100(%r15) + la %r2,0(%r14) + st %r1,0(%r15) + la %r3,0(%r3) + bras %r14,0f + .long ftrace_trace_function +0: l %r14,0(%r14) + l %r14,0(%r14) + basr %r14,%r14 + ahi %r15,96 + lm %r0,%r5,8(%r15) + l %r14,56(%r15) + br %r14 + +.globl ftrace_stub +ftrace_stub: + br %r14 + +#else /* CONFIG_64BIT */ + +.globl _mcount +_mcount: + stmg %r0,%r5,16(%r15) + stg %r14,112(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,0(%r15) + lgr %r2,%r14 + lg %r3,168(%r15) + larl %r14,ftrace_trace_function + lg %r14,0(%r14) + basr %r14,%r14 + aghi %r15,160 + lmg %r0,%r5,16(%r15) + lg %r14,112(%r15) + br %r14 + +.globl ftrace_stub +ftrace_stub: + br %r14 + +#endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c new file mode 100644 index 000000000000..82c1872cfe80 --- /dev/null +++ b/arch/s390/kernel/processor.c @@ -0,0 +1,98 @@ +/* + * arch/s390/kernel/processor.c + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define KMSG_COMPONENT "cpu" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/seq_file.h> +#include <linux/delay.h> + +#include <asm/elf.h> +#include <asm/lowcore.h> +#include <asm/param.h> + +void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) +{ + pr_info("Processor %d started, address %d, identification %06X\n", + cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident); +} + +/* + * show_cpuinfo - Get information on one CPU for use by procfs. + */ + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + static const char *hwcap_str[8] = { + "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", + "edat" + }; + struct cpuinfo_S390 *cpuinfo; + unsigned long n = (unsigned long) v - 1; + int i; + + s390_adjust_jiffies(); + preempt_disable(); + if (!n) { + seq_printf(m, "vendor_id : IBM/S390\n" + "# processors : %i\n" + "bogomips per cpu: %lu.%02lu\n", + num_online_cpus(), loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ))%100); + seq_puts(m, "features\t: "); + for (i = 0; i < 8; i++) + if (hwcap_str[i] && (elf_hwcap & (1UL << i))) + seq_printf(m, "%s ", hwcap_str[i]); + seq_puts(m, "\n"); + } + + if (cpu_online(n)) { +#ifdef CONFIG_SMP + if (smp_processor_id() == n) + cpuinfo = &S390_lowcore.cpu_data; + else + cpuinfo = &lowcore_ptr[n]->cpu_data; +#else + cpuinfo = &S390_lowcore.cpu_data; +#endif + seq_printf(m, "processor %li: " + "version = %02X, " + "identification = %06X, " + "machine = %04X\n", + n, cpuinfo->cpu_id.version, + cpuinfo->cpu_id.ident, + cpuinfo->cpu_id.machine); + } + preempt_enable(); + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 38ff2bce1203..75c496f4f16d 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) static int peek_user(struct task_struct *child, addr_t addr, addr_t data) { - struct user *dummy = NULL; addr_t tmp, mask; /* @@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) */ mask = __ADDR_MASK; #ifdef CONFIG_64BIT - if (addr >= (addr_t) &dummy->regs.acrs && - addr < (addr_t) &dummy->regs.orig_gpr2) + if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && + addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) mask = 3; #endif if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) @@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) static int poke_user(struct task_struct *child, addr_t addr, addr_t data) { - struct user *dummy = NULL; addr_t mask; /* @@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) */ mask = __ADDR_MASK; #ifdef CONFIG_64BIT - if (addr >= (addr_t) &dummy->regs.acrs && - addr < (addr_t) &dummy->regs.orig_gpr2) + if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && + addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) mask = 3; #endif if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 48238a114ce9..46b90cb03707 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -14,6 +14,7 @@ #include <asm/delay.h> #include <asm/pgalloc.h> #include <asm/setup.h> +#include <asm/ftrace.h> #ifdef CONFIG_IP_MULTICAST #include <net/arp.h> #endif @@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold); EXPORT_SYMBOL(console_mode); EXPORT_SYMBOL(console_devno); EXPORT_SYMBOL(console_irq); + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 400b040df7fa..b7a1efd5522c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -14,6 +14,9 @@ * This file handles the architecture-dependent parts of initialization */ +#define KMSG_COMPONENT "setup" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> @@ -32,7 +35,6 @@ #include <linux/bootmem.h> #include <linux/root_dev.h> #include <linux/console.h> -#include <linux/seq_file.h> #include <linux/kernel_stat.h> #include <linux/device.h> #include <linux/notifier.h> @@ -291,8 +293,8 @@ unsigned int switch_amode = 0; #endif EXPORT_SYMBOL_GPL(switch_amode); -static void set_amode_and_uaccess(unsigned long user_amode, - unsigned long user32_amode) +static int set_amode_and_uaccess(unsigned long user_amode, + unsigned long user32_amode) { psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | @@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode, PSW_MASK_MCHECK | PSW_DEFAULT_KEY; if (MACHINE_HAS_MVCOS) { - printk("mvcos available.\n"); memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); + return 1; } else { - printk("mvcos not available.\n"); memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); + return 0; } } @@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p) early_param("switch_amode", early_parse_switch_amode); #else /* CONFIG_S390_SWITCH_AMODE */ -static inline void set_amode_and_uaccess(unsigned long user_amode, - unsigned long user32_amode) +static inline int set_amode_and_uaccess(unsigned long user_amode, + unsigned long user32_amode) { + return 0; } #endif /* CONFIG_S390_SWITCH_AMODE */ @@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec); static void setup_addressing_mode(void) { if (s390_noexec) { - printk("S390 execute protection active, "); - set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); + if (set_amode_and_uaccess(PSW_ASC_SECONDARY, + PSW32_ASC_SECONDARY)) + pr_info("Execute protection active, " + "mvcos available\n"); + else + pr_info("Execute protection active, " + "mvcos not available\n"); } else if (switch_amode) { - printk("S390 address spaces switched, "); - set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); + if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) + pr_info("Address spaces switched, " + "mvcos available\n"); + else + pr_info("Address spaces switched, " + "mvcos not available\n"); } #ifdef CONFIG_TRACE_IRQFLAGS sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; @@ -572,15 +584,15 @@ setup_memory(void) start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; if (start + INITRD_SIZE > memory_end) { - printk("initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx)\n" + pr_err("initrd extends beyond end of " + "memory (0x%08lx > 0x%08lx) " "disabling initrd\n", start + INITRD_SIZE, memory_end); INITRD_START = INITRD_SIZE = 0; } else { - printk("Moving initrd (0x%08lx -> 0x%08lx, " - "size: %ld)\n", - INITRD_START, start, INITRD_SIZE); + pr_info("Moving initrd (0x%08lx -> " + "0x%08lx, size: %ld)\n", + INITRD_START, start, INITRD_SIZE); memmove((void *) start, (void *) INITRD_START, INITRD_SIZE); INITRD_START = start; @@ -642,8 +654,9 @@ setup_memory(void) initrd_start = INITRD_START; initrd_end = initrd_start + INITRD_SIZE; } else { - printk("initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + pr_err("initrd extends beyond end of " + "memory (0x%08lx > 0x%08lx) " + "disabling initrd\n", initrd_start + INITRD_SIZE, memory_end); initrd_start = initrd_end = 0; } @@ -651,23 +664,6 @@ setup_memory(void) #endif } -static int __init __stfle(unsigned long long *list, int doublewords) -{ - typedef struct { unsigned long long _[doublewords]; } addrtype; - register unsigned long __nr asm("0") = doublewords - 1; - - asm volatile(".insn s,0xb2b00000,%0" /* stfle */ - : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); - return __nr + 1; -} - -int __init stfle(unsigned long long *list, int doublewords) -{ - if (!(stfl() & (1UL << 24))) - return -EOPNOTSUPP; - return __stfle(list, doublewords); -} - /* * Setup hardware capabilities. */ @@ -739,8 +735,13 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z990"); break; case 0x2094: + case 0x2096: strcpy(elf_platform, "z9-109"); break; + case 0x2097: + case 0x2098: + strcpy(elf_platform, "z10"); + break; } } @@ -752,25 +753,34 @@ static void __init setup_hwcaps(void) void __init setup_arch(char **cmdline_p) { + /* set up preferred console */ + add_preferred_console("ttyS", 0, NULL); + /* * print what head.S has found out about the machine */ #ifndef CONFIG_64BIT - printk((MACHINE_IS_VM) ? - "We are running under VM (31 bit mode)\n" : - "We are running native (31 bit mode)\n"); - printk((MACHINE_HAS_IEEE) ? - "This machine has an IEEE fpu\n" : - "This machine has no IEEE fpu\n"); + if (MACHINE_IS_VM) + pr_info("Linux is running as a z/VM " + "guest operating system in 31-bit mode\n"); + else + pr_info("Linux is running natively in 31-bit mode\n"); + if (MACHINE_HAS_IEEE) + pr_info("The hardware system has IEEE compatible " + "floating point units\n"); + else + pr_info("The hardware system has no IEEE compatible " + "floating point units\n"); #else /* CONFIG_64BIT */ if (MACHINE_IS_VM) - printk("We are running under VM (64 bit mode)\n"); + pr_info("Linux is running as a z/VM " + "guest operating system in 64-bit mode\n"); else if (MACHINE_IS_KVM) { - printk("We are running under KVM (64 bit mode)\n"); + pr_info("Linux is running under KVM in 64-bit mode\n"); add_preferred_console("hvc", 0, NULL); s390_virtio_console_init(); } else - printk("We are running native (64 bit mode)\n"); + pr_info("Linux is running natively in 64-bit mode\n"); #endif /* CONFIG_64BIT */ /* Have one command line that is parsed and saved in /proc/cmdline */ @@ -818,90 +828,3 @@ setup_arch(char **cmdline_p) /* Setup zfcpdump support */ setup_zfcpdump(console_devno); } - -void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) -{ - printk(KERN_INFO "cpu %d " -#ifdef CONFIG_SMP - "phys_idx=%d " -#endif - "vers=%02X ident=%06X machine=%04X unused=%04X\n", - cpuinfo->cpu_nr, -#ifdef CONFIG_SMP - cpuinfo->cpu_addr, -#endif - cpuinfo->cpu_id.version, - cpuinfo->cpu_id.ident, - cpuinfo->cpu_id.machine, - cpuinfo->cpu_id.unused); -} - -/* - * show_cpuinfo - Get information on one CPU for use by procfs. - */ - -static int show_cpuinfo(struct seq_file *m, void *v) -{ - static const char *hwcap_str[8] = { - "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat" - }; - struct cpuinfo_S390 *cpuinfo; - unsigned long n = (unsigned long) v - 1; - int i; - - s390_adjust_jiffies(); - preempt_disable(); - if (!n) { - seq_printf(m, "vendor_id : IBM/S390\n" - "# processors : %i\n" - "bogomips per cpu: %lu.%02lu\n", - num_online_cpus(), loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ))%100); - seq_puts(m, "features\t: "); - for (i = 0; i < 8; i++) - if (hwcap_str[i] && (elf_hwcap & (1UL << i))) - seq_printf(m, "%s ", hwcap_str[i]); - seq_puts(m, "\n"); - } - - if (cpu_online(n)) { -#ifdef CONFIG_SMP - if (smp_processor_id() == n) - cpuinfo = &S390_lowcore.cpu_data; - else - cpuinfo = &lowcore_ptr[n]->cpu_data; -#else - cpuinfo = &S390_lowcore.cpu_data; -#endif - seq_printf(m, "processor %li: " - "version = %02X, " - "identification = %06X, " - "machine = %04X\n", - n, cpuinfo->cpu_id.version, - cpuinfo->cpu_id.ident, - cpuinfo->cpu_id.machine); - } - preempt_enable(); - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; -} -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return c_start(m, pos); -} -static void c_stop(struct seq_file *m, void *v) -{ -} -const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; - diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b5595688a477..6fc78541dc57 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -20,6 +20,9 @@ * cpu_number_map in other architectures. */ +#define KMSG_COMPONENT "cpu" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/mm.h> @@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void smp_ext_bitcall(int, ec_bit_sig); -/* - * Structure and data for __smp_call_function_map(). This is designed to - * minimise static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - cpumask_t started; - cpumask_t finished; - int wait; -}; - -static struct call_data_struct *call_data; - -/* - * 'Call function' interrupt callback - */ -static void do_call_function(void) -{ - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - cpu_set(smp_processor_id(), call_data->started); - (*func)(info); - if (wait) - cpu_set(smp_processor_id(), call_data->finished);; -} - -static void __smp_call_function_map(void (*func) (void *info), void *info, - int wait, cpumask_t map) -{ - struct call_data_struct data; - int cpu, local = 0; - - /* - * Can deadlock when interrupts are disabled or if in wrong context. - */ - WARN_ON(irqs_disabled() || in_irq()); - - /* - * Check for local function call. We have to have the same call order - * as in on_each_cpu() because of machine_restart_smp(). - */ - if (cpu_isset(smp_processor_id(), map)) { - local = 1; - cpu_clear(smp_processor_id(), map); - } - - cpus_and(map, map, cpu_online_map); - if (cpus_empty(map)) - goto out; - - data.func = func; - data.info = info; - data.started = CPU_MASK_NONE; - data.wait = wait; - if (wait) - data.finished = CPU_MASK_NONE; - - call_data = &data; - - for_each_cpu_mask(cpu, map) - smp_ext_bitcall(cpu, ec_call_function); - - /* Wait for response */ - while (!cpus_equal(map, data.started)) - cpu_relax(); - if (wait) - while (!cpus_equal(map, data.finished)) - cpu_relax(); -out: - if (local) { - local_irq_disable(); - func(info); - local_irq_enable(); - } -} - -/* - * smp_call_function: - * @func: the function to run; this must be fast and non-blocking - * @info: an arbitrary pointer to pass to the function - * @wait: if true, wait (atomically) until function has completed on other CPUs - * - * Run a function on all other CPUs. - * - * You must not call this function with disabled interrupts, from a - * hardware interrupt handler or from a bottom half. - */ -int smp_call_function(void (*func) (void *info), void *info, int wait) -{ - cpumask_t map; - - spin_lock(&call_lock); - map = cpu_online_map; - cpu_clear(smp_processor_id(), map); - __smp_call_function_map(func, info, wait, map); - spin_unlock(&call_lock); - return 0; -} -EXPORT_SYMBOL(smp_call_function); - -/* - * smp_call_function_single: - * @cpu: the CPU where func should run - * @func: the function to run; this must be fast and non-blocking - * @info: an arbitrary pointer to pass to the function - * @wait: if true, wait (atomically) until function has completed on other CPUs - * - * Run a function on one processor. - * - * You must not call this function with disabled interrupts, from a - * hardware interrupt handler or from a bottom half. - */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int wait) -{ - spin_lock(&call_lock); - __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); - spin_unlock(&call_lock); - return 0; -} -EXPORT_SYMBOL(smp_call_function_single); - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, - int wait) -{ - spin_lock(&call_lock); - cpu_clear(smp_processor_id(), mask); - __smp_call_function_map(func, info, wait, mask); - spin_unlock(&call_lock); - return 0; -} -EXPORT_SYMBOL(smp_call_function_mask); - void smp_send_stop(void) { int cpu, rc; @@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code) bits = xchg(&S390_lowcore.ext_call_fast, 0); if (test_bit(ec_call_function, &bits)) - do_call_function(); + generic_smp_call_function_interrupt(); + + if (test_bit(ec_call_function_single, &bits)) + generic_smp_call_function_single_interrupt(); } /* @@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) udelay(10); } +void arch_send_call_function_ipi(cpumask_t mask) +{ + int cpu; + + for_each_cpu_mask(cpu, mask) + smp_ext_bitcall(cpu, ec_call_function); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ext_bitcall(cpu, ec_call_function_single); +} + #ifndef CONFIG_64BIT /* * this function sends a 'purge tlb' signal to another CPU. @@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) if (ipl_info.type != IPL_TYPE_FCP_DUMP) return; if (cpu >= NR_CPUS) { - printk(KERN_WARNING "Registers for cpu %i not saved since dump " - "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); + pr_warning("CPU %i exceeds the maximum %i and is excluded from " + "the dump\n", cpu, NR_CPUS - 1); return; } zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); @@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void) } out: kfree(info); - printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); + pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); get_online_cpus(); __smp_rescan_cpus(); put_online_cpus(); @@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid) preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ init_cpu_timer(); -#ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ init_cpu_vtimer(); -#endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); /* call cpu notifiers */ notify_cpu_starting(smp_processor_id()); /* Mark this cpu as online */ - spin_lock(&call_lock); + ipi_call_lock(); cpu_set(smp_processor_id(), cpu_online_map); - spin_unlock(&call_lock); + ipi_call_unlock(); /* Switch on interrupts */ local_irq_enable(); /* Print info about this processor */ @@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu) save_area = get_zeroed_page(GFP_KERNEL); if (!save_area) - goto out_save_area; + goto out; lowcore->extended_save_area_addr = (u32) save_area; } #endif lowcore_ptr[cpu] = lowcore; return 0; -#ifndef CONFIG_64BIT -out_save_area: - free_page(panic_stack); -#endif out: + free_page(panic_stack); free_pages(async_stack, ASYNC_ORDER); free_pages((unsigned long) lowcore, lc_order); return -ENOMEM; @@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode) { - printk("sigp_set_prefix failed for cpu %d " - "with condition code %d\n", - (int) cpu, (int) ccode); + if (ccode) return -EIO; - } idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; @@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu) while (!smp_cpu_not_running(cpu)) cpu_relax(); smp_free_lowcore(cpu); - printk(KERN_INFO "Processor %d spun down\n", cpu); + pr_info("Processor %d stopped\n", cpu); } void cpu_die(void) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index eccefbbff887..5be981a36c3e 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -12,6 +12,9 @@ * Copyright (C) 1991, 1992, 1995 Linus Torvalds */ +#define KMSG_COMPONENT "time" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> @@ -20,6 +23,8 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> +#include <linux/cpu.h> +#include <linux/stop_machine.h> #include <linux/time.h> #include <linux/sysdev.h> #include <linux/delay.h> @@ -36,6 +41,7 @@ #include <asm/delay.h> #include <asm/s390_ext.h> #include <asm/div64.h> +#include <asm/vdso.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/timer.h> @@ -223,6 +229,36 @@ static struct clocksource clocksource_tod = { }; +void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) +{ + if (clock != &clocksource_tod) + return; + + /* Make userspace gettimeofday spin until we're done. */ + ++vdso_data->tb_update_count; + smp_wmb(); + vdso_data->xtime_tod_stamp = clock->cycle_last; + vdso_data->xtime_clock_sec = xtime.tv_sec; + vdso_data->xtime_clock_nsec = xtime.tv_nsec; + vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; + vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + smp_wmb(); + ++vdso_data->tb_update_count; +} + +extern struct timezone sys_tz; + +void update_vsyscall_tz(void) +{ + /* Make userspace gettimeofday spin until we're done. */ + ++vdso_data->tb_update_count; + smp_wmb(); + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; + smp_wmb(); + ++vdso_data->tb_update_count; +} + /* * Initialize the TOD clock and the CPU timer of * the boot cpu. @@ -253,10 +289,8 @@ void __init time_init(void) /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer(); - -#ifdef CONFIG_VIRT_TIMER + /* Enable cpu timer interrupts on the boot cpu. */ vtime_init(); -#endif } /* @@ -288,8 +322,8 @@ static unsigned long long adjust_time(unsigned long long old, } sched_clock_base_cc += delta; if (adjust.offset != 0) { - printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", - adjust.offset); + pr_notice("The ETR interface has adjusted the clock " + "by %li microseconds\n", adjust.offset); adjust.modes = ADJ_OFFSET_SINGLESHOT; do_adjtimex(&adjust); } @@ -360,6 +394,15 @@ static void enable_sync_clock(void) atomic_set_mask(0x80000000, sw_ptr); } +/* Single threaded workqueue used for etr and stp sync events */ +static struct workqueue_struct *time_sync_wq; + +static void __init time_init_wq(void) +{ + if (!time_sync_wq) + time_sync_wq = create_singlethread_workqueue("timesync"); +} + /* * External Time Reference (ETR) code. */ @@ -425,6 +468,7 @@ static struct timer_list etr_timer; static void etr_timeout(unsigned long dummy); static void etr_work_fn(struct work_struct *work); +static DEFINE_MUTEX(etr_work_mutex); static DECLARE_WORK(etr_work, etr_work_fn); /* @@ -440,8 +484,8 @@ static void etr_reset(void) etr_tolec = get_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); } else if (etr_port0_online || etr_port1_online) { - printk(KERN_WARNING "Running on non ETR capable " - "machine, only local mode available.\n"); + pr_warning("The real or virtual hardware system does " + "not provide an ETR interface\n"); etr_port0_online = etr_port1_online = 0; } } @@ -452,17 +496,18 @@ static int __init etr_init(void) if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) return 0; + time_init_wq(); /* Check if this machine has the steai instruction. */ if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) etr_steai_available = 1; setup_timer(&etr_timer, etr_timeout, 0UL); if (etr_port0_online) { set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } if (etr_port1_online) { set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } return 0; } @@ -489,7 +534,7 @@ void etr_switch_to_local(void) if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) disable_sync_clock(NULL); set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } /* @@ -505,7 +550,7 @@ void etr_sync_check(void) if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) disable_sync_clock(NULL); set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } /* @@ -529,13 +574,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm) * Both ports are not up-to-date now. */ set_bit(ETR_EVENT_PORT_ALERT, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } static void etr_timeout(unsigned long dummy) { set_bit(ETR_EVENT_UPDATE, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } /* @@ -642,14 +687,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) } struct clock_sync_data { + atomic_t cpus; int in_sync; unsigned long long fixup_cc; + int etr_port; + struct etr_aib *etr_aib; }; -static void clock_sync_cpu_start(void *dummy) +static void clock_sync_cpu(struct clock_sync_data *sync) { - struct clock_sync_data *sync = dummy; - + atomic_dec(&sync->cpus); enable_sync_clock(); /* * This looks like a busy wait loop but it isn't. etr_sync_cpus @@ -675,39 +722,35 @@ static void clock_sync_cpu_start(void *dummy) fixup_clock_comparator(sync->fixup_cc); } -static void clock_sync_cpu_end(void *dummy) -{ -} - /* * Sync the TOD clock using the port refered to by aibp. This port * has to be enabled and the other port has to be disabled. The * last eacr update has to be more than 1.6 seconds in the past. */ -static int etr_sync_clock(struct etr_aib *aib, int port) +static int etr_sync_clock(void *data) { - struct etr_aib *sync_port; - struct clock_sync_data etr_sync; + static int first; unsigned long long clock, old_clock, delay, delta; - int follows; + struct clock_sync_data *etr_sync; + struct etr_aib *sync_port, *aib; + int port; int rc; - /* Check if the current aib is adjacent to the sync port aib. */ - sync_port = (port == 0) ? &etr_port0 : &etr_port1; - follows = etr_aib_follows(sync_port, aib, port); - memcpy(sync_port, aib, sizeof(*aib)); - if (!follows) - return -EAGAIN; + etr_sync = data; - /* - * Catch all other cpus and make them wait until we have - * successfully synced the clock. smp_call_function will - * return after all other cpus are in etr_sync_cpu_start. - */ - memset(&etr_sync, 0, sizeof(etr_sync)); - preempt_disable(); - smp_call_function(clock_sync_cpu_start, &etr_sync, 0); - local_irq_disable(); + if (xchg(&first, 1) == 1) { + /* Slave */ + clock_sync_cpu(etr_sync); + return 0; + } + + /* Wait until all other cpus entered the sync function. */ + while (atomic_read(&etr_sync->cpus) != 0) + cpu_relax(); + + port = etr_sync->etr_port; + aib = etr_sync->etr_aib; + sync_port = (port == 0) ? &etr_port0 : &etr_port1; enable_sync_clock(); /* Set clock to next OTE. */ @@ -724,16 +767,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port) delay = (unsigned long long) (aib->edf2.etv - sync_port->edf2.etv) << 32; delta = adjust_time(old_clock, clock, delay); - etr_sync.fixup_cc = delta; + etr_sync->fixup_cc = delta; fixup_clock_comparator(delta); /* Verify that the clock is properly set. */ if (!etr_aib_follows(sync_port, aib, port)) { /* Didn't work. */ disable_sync_clock(NULL); - etr_sync.in_sync = -EAGAIN; + etr_sync->in_sync = -EAGAIN; rc = -EAGAIN; } else { - etr_sync.in_sync = 1; + etr_sync->in_sync = 1; rc = 0; } } else { @@ -741,12 +784,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port) __ctl_clear_bit(0, 29); __ctl_clear_bit(14, 21); disable_sync_clock(NULL); - etr_sync.in_sync = -EAGAIN; + etr_sync->in_sync = -EAGAIN; rc = -EAGAIN; } - local_irq_enable(); - smp_call_function(clock_sync_cpu_end, NULL, 0); - preempt_enable(); + xchg(&first, 0); + return rc; +} + +static int etr_sync_clock_stop(struct etr_aib *aib, int port) +{ + struct clock_sync_data etr_sync; + struct etr_aib *sync_port; + int follows; + int rc; + + /* Check if the current aib is adjacent to the sync port aib. */ + sync_port = (port == 0) ? &etr_port0 : &etr_port1; + follows = etr_aib_follows(sync_port, aib, port); + memcpy(sync_port, aib, sizeof(*aib)); + if (!follows) + return -EAGAIN; + memset(&etr_sync, 0, sizeof(etr_sync)); + etr_sync.etr_aib = aib; + etr_sync.etr_port = port; + get_online_cpus(); + atomic_set(&etr_sync.cpus, num_online_cpus() - 1); + rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); + put_online_cpus(); return rc; } @@ -903,7 +967,7 @@ static void etr_update_eacr(struct etr_eacr eacr) } /* - * ETR tasklet. In this function you'll find the main logic. In + * ETR work. In this function you'll find the main logic. In * particular this is the only function that calls etr_update_eacr(), * it "controls" the etr control register. */ @@ -914,6 +978,9 @@ static void etr_work_fn(struct work_struct *work) struct etr_aib aib; int sync_port; + /* prevent multiple execution. */ + mutex_lock(&etr_work_mutex); + /* Create working copy of etr_eacr. */ eacr = etr_eacr; @@ -929,7 +996,7 @@ static void etr_work_fn(struct work_struct *work) del_timer_sync(&etr_timer); etr_update_eacr(eacr); clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); - return; + goto out_unlock; } /* Store aib to get the current ETR status word. */ @@ -1016,7 +1083,7 @@ static void etr_work_fn(struct work_struct *work) eacr.es || sync_port < 0) { etr_update_eacr(eacr); etr_set_tolec_timeout(now); - return; + goto out_unlock; } /* @@ -1036,7 +1103,7 @@ static void etr_work_fn(struct work_struct *work) etr_update_eacr(eacr); set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); if (now < etr_tolec + (1600000 << 12) || - etr_sync_clock(&aib, sync_port) != 0) { + etr_sync_clock_stop(&aib, sync_port) != 0) { /* Sync failed. Try again in 1/2 second. */ eacr.es = 0; etr_update_eacr(eacr); @@ -1044,6 +1111,8 @@ static void etr_work_fn(struct work_struct *work) etr_set_sync_timeout(); } else etr_set_tolec_timeout(now); +out_unlock: + mutex_unlock(&etr_work_mutex); } /* @@ -1125,13 +1194,13 @@ static ssize_t etr_online_store(struct sys_device *dev, return count; /* Nothing to do. */ etr_port0_online = value; set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } else { if (etr_port1_online == value) return count; /* Nothing to do. */ etr_port1_online = value; set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } return count; } @@ -1332,6 +1401,7 @@ static struct stp_sstpi stp_info; static void *stp_page; static void stp_work_fn(struct work_struct *work); +static DEFINE_MUTEX(stp_work_mutex); static DECLARE_WORK(stp_work, stp_work_fn); static int __init early_parse_stp(char *p) @@ -1356,7 +1426,8 @@ static void __init stp_reset(void) if (rc == 0) set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); else if (stp_online) { - printk(KERN_WARNING "Running on non STP capable machine.\n"); + pr_warning("The real or virtual hardware system does " + "not provide an STP interface\n"); free_bootmem((unsigned long) stp_page, PAGE_SIZE); stp_page = NULL; stp_online = 0; @@ -1365,8 +1436,12 @@ static void __init stp_reset(void) static int __init stp_init(void) { - if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) - schedule_work(&stp_work); + if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) + return 0; + time_init_wq(); + if (!stp_online) + return 0; + queue_work(time_sync_wq, &stp_work); return 0; } @@ -1383,7 +1458,7 @@ arch_initcall(stp_init); static void stp_timing_alert(struct stp_irq_parm *intparm) { if (intparm->tsc || intparm->lac || intparm->tcpc) - schedule_work(&stp_work); + queue_work(time_sync_wq, &stp_work); } /* @@ -1397,7 +1472,7 @@ void stp_sync_check(void) if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) return; disable_sync_clock(NULL); - schedule_work(&stp_work); + queue_work(time_sync_wq, &stp_work); } /* @@ -1411,46 +1486,34 @@ void stp_island_check(void) if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) return; disable_sync_clock(NULL); - schedule_work(&stp_work); + queue_work(time_sync_wq, &stp_work); } -/* - * STP tasklet. Check for the STP state and take over the clock - * synchronization if the STP clock source is usable. - */ -static void stp_work_fn(struct work_struct *work) + +static int stp_sync_clock(void *data) { - struct clock_sync_data stp_sync; + static int first; unsigned long long old_clock, delta; + struct clock_sync_data *stp_sync; int rc; - if (!stp_online) { - chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); - return; - } + stp_sync = data; - rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); - if (rc) - return; + if (xchg(&first, 1) == 1) { + /* Slave */ + clock_sync_cpu(stp_sync); + return 0; + } - rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); - if (rc || stp_info.c == 0) - return; + /* Wait until all other cpus entered the sync function. */ + while (atomic_read(&stp_sync->cpus) != 0) + cpu_relax(); - /* - * Catch all other cpus and make them wait until we have - * successfully synced the clock. smp_call_function will - * return after all other cpus are in clock_sync_cpu_start. - */ - memset(&stp_sync, 0, sizeof(stp_sync)); - preempt_disable(); - smp_call_function(clock_sync_cpu_start, &stp_sync, 0); - local_irq_disable(); enable_sync_clock(); set_bit(CLOCK_SYNC_STP, &clock_sync_flags); if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); rc = 0; if (stp_info.todoff[0] || stp_info.todoff[1] || @@ -1469,16 +1532,49 @@ static void stp_work_fn(struct work_struct *work) } if (rc) { disable_sync_clock(NULL); - stp_sync.in_sync = -EAGAIN; + stp_sync->in_sync = -EAGAIN; clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); if (etr_port0_online || etr_port1_online) - schedule_work(&etr_work); + queue_work(time_sync_wq, &etr_work); } else - stp_sync.in_sync = 1; + stp_sync->in_sync = 1; + xchg(&first, 0); + return 0; +} + +/* + * STP work. Check for the STP state and take over the clock + * synchronization if the STP clock source is usable. + */ +static void stp_work_fn(struct work_struct *work) +{ + struct clock_sync_data stp_sync; + int rc; + + /* prevent multiple execution. */ + mutex_lock(&stp_work_mutex); + + if (!stp_online) { + chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); + goto out_unlock; + } + + rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); + if (rc) + goto out_unlock; + + rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); + if (rc || stp_info.c == 0) + goto out_unlock; + + memset(&stp_sync, 0, sizeof(stp_sync)); + get_online_cpus(); + atomic_set(&stp_sync.cpus, num_online_cpus() - 1); + stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); + put_online_cpus(); - local_irq_enable(); - smp_call_function(clock_sync_cpu_end, NULL, 0); - preempt_enable(); +out_unlock: + mutex_unlock(&stp_work_mutex); } /* @@ -1587,7 +1683,7 @@ static ssize_t stp_online_store(struct sysdev_class *class, if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return -EOPNOTSUPP; stp_online = value; - schedule_work(&stp_work); + queue_work(time_sync_wq, &stp_work); return count; } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index a947899dcba1..90e9ba11eba1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -3,6 +3,9 @@ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ +#define KMSG_COMPONENT "cpu" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> @@ -12,6 +15,7 @@ #include <linux/workqueue.h> #include <linux/cpu.h> #include <linux/smp.h> +#include <linux/cpuset.h> #include <asm/delay.h> #include <asm/s390_ext.h> #include <asm/sysinfo.h> @@ -57,11 +61,11 @@ struct core_info { cpumask_t mask; }; +static int topology_enabled; static void topology_work_fn(struct work_struct *work); static struct tl_info *tl_info; static struct core_info core_info; static int machine_has_topology; -static int machine_has_topology_irq; static struct timer_list topology_timer; static void set_topology_timer(void); static DECLARE_WORK(topology_work, topology_work_fn); @@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) cpumask_t mask; cpus_clear(mask); - if (!machine_has_topology) - return cpu_present_map; + if (!topology_enabled || !machine_has_topology) + return cpu_possible_map; spin_lock_irqsave(&topology_lock, flags); while (core) { if (cpu_isset(cpu, core->mask)) { @@ -168,7 +172,7 @@ static void topology_update_polarization_simple(void) int cpu; mutex_lock(&smp_cpu_state_mutex); - for_each_present_cpu(cpu) + for_each_possible_cpu(cpu) smp_cpu_polarization[cpu] = POLARIZATION_HRZ; mutex_unlock(&smp_cpu_state_mutex); } @@ -199,7 +203,7 @@ int topology_set_cpu_management(int fc) rc = ptf(PTF_HORIZONTAL); if (rc) return -EBUSY; - for_each_present_cpu(cpu) + for_each_possible_cpu(cpu) smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; return rc; } @@ -208,11 +212,11 @@ static void update_cpu_core_map(void) { int cpu; - for_each_present_cpu(cpu) + for_each_possible_cpu(cpu) cpu_core_map[cpu] = cpu_coregroup_map(cpu); } -void arch_update_cpu_topology(void) +int arch_update_cpu_topology(void) { struct tl_info *info = tl_info; struct sys_device *sysdev; @@ -221,7 +225,7 @@ void arch_update_cpu_topology(void) if (!machine_has_topology) { update_cpu_core_map(); topology_update_polarization_simple(); - return; + return 0; } stsi(info, 15, 1, 2); tl_to_cores(info); @@ -230,11 +234,12 @@ void arch_update_cpu_topology(void) sysdev = get_cpu_sysdev(cpu); kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); } + return 1; } static void topology_work_fn(struct work_struct *work) { - arch_reinit_sched_domains(); + rebuild_sched_domains(); } void topology_schedule_update(void) @@ -257,10 +262,14 @@ static void set_topology_timer(void) add_timer(&topology_timer); } -static void topology_interrupt(__u16 code) +static int __init early_parse_topology(char *p) { - schedule_work(&topology_work); + if (strncmp(p, "on", 2)) + return 0; + topology_enabled = 1; + return 0; } +early_param("topology", early_parse_topology); static int __init init_topology_update(void) { @@ -272,14 +281,7 @@ static int __init init_topology_update(void) goto out; } init_timer_deferrable(&topology_timer); - if (machine_has_topology_irq) { - rc = register_external_interrupt(0x2005, topology_interrupt); - if (rc) - goto out; - ctl_set_bit(0, 8); - } - else - set_topology_timer(); + set_topology_timer(); out: update_cpu_core_map(); return rc; @@ -300,9 +302,6 @@ void __init s390_init_cpu_topology(void) return; machine_has_topology = 1; - if (facility_bits & (1ULL << 51)) - machine_has_topology_irq = 1; - tl_info = alloc_bootmem_pages(PAGE_SIZE); info = tl_info; stsi(info, 15, 1, 2); @@ -311,7 +310,7 @@ void __init s390_init_cpu_topology(void) for (i = 0; i < info->mnest - 2; i++) nr_cores *= info->mag[NR_MAG - 3 - i]; - printk(KERN_INFO "CPU topology:"); + pr_info("The CPU configuration topology of the machine is:"); for (i = 0; i < NR_MAG; i++) printk(" %d", info->mag[i]); printk(" / %d\n", info->mnest); @@ -326,5 +325,4 @@ void __init s390_init_cpu_topology(void) return; error: machine_has_topology = 0; - machine_has_topology_irq = 0; } diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c new file mode 100644 index 000000000000..10a6ccef4412 --- /dev/null +++ b/arch/s390/kernel/vdso.c @@ -0,0 +1,234 @@ +/* + * vdso setup for s390 + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/elf.h> +#include <linux/security.h> +#include <linux/bootmem.h> + +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/processor.h> +#include <asm/mmu.h> +#include <asm/mmu_context.h> +#include <asm/sections.h> +#include <asm/vdso.h> + +/* Max supported size for symbol names */ +#define MAX_SYMNAME 64 + +#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) +extern char vdso32_start, vdso32_end; +static void *vdso32_kbase = &vdso32_start; +static unsigned int vdso32_pages; +static struct page **vdso32_pagelist; +#endif + +#ifdef CONFIG_64BIT +extern char vdso64_start, vdso64_end; +static void *vdso64_kbase = &vdso64_start; +static unsigned int vdso64_pages; +static struct page **vdso64_pagelist; +#endif /* CONFIG_64BIT */ + +/* + * Should the kernel map a VDSO page into processes and pass its + * address down to glibc upon exec()? + */ +unsigned int __read_mostly vdso_enabled = 1; + +static int __init vdso_setup(char *s) +{ + vdso_enabled = simple_strtoul(s, NULL, 0); + return 1; +} +__setup("vdso=", vdso_setup); + +/* + * The vdso data page + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __attribute__((__section__(".data.page_aligned"))); +struct vdso_data *vdso_data = &vdso_data_store.data; + +/* + * This is called from binfmt_elf, we create the special vma for the + * vDSO and insert it into the mm struct tree + */ +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + struct page **vdso_pagelist; + unsigned long vdso_pages; + unsigned long vdso_base; + int rc; + + if (!vdso_enabled) + return 0; + /* + * Only map the vdso for dynamically linked elf binaries. + */ + if (!uses_interp) + return 0; + + vdso_base = mm->mmap_base; +#ifdef CONFIG_64BIT + vdso_pagelist = vdso64_pagelist; + vdso_pages = vdso64_pages; +#ifdef CONFIG_COMPAT + if (test_thread_flag(TIF_31BIT)) { + vdso_pagelist = vdso32_pagelist; + vdso_pages = vdso32_pages; + } +#endif +#else + vdso_pagelist = vdso32_pagelist; + vdso_pages = vdso32_pages; +#endif + + /* + * vDSO has a problem and was disabled, just don't "enable" it for + * the process + */ + if (vdso_pages == 0) + return 0; + + current->mm->context.vdso_base = 0; + + /* + * pick a base address for the vDSO in process space. We try to put + * it at vdso_base which is the "natural" base for it, but we might + * fail and end up putting it elsewhere. + */ + down_write(&mm->mmap_sem); + vdso_base = get_unmapped_area(NULL, vdso_base, + vdso_pages << PAGE_SHIFT, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + rc = vdso_base; + goto out_up; + } + + /* + * our vma flags don't have VM_WRITE so by default, the process + * isn't allowed to write those pages. + * gdb can break that with ptrace interface, and thus trigger COW + * on those pages but it's then your responsibility to never do that + * on the "data" page of the vDSO or you'll stop getting kernel + * updates and your nice userland gettimeofday will be totally dead. + * It's fine to use that for setting breakpoints in the vDSO code + * pages though + * + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later + * without matching up the same kernel and hardware config to see + * what PC values meant. + */ + rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| + VM_ALWAYSDUMP, + vdso_pagelist); + if (rc) + goto out_up; + + /* Put vDSO base into mm struct */ + current->mm->context.vdso_base = vdso_base; + + up_write(&mm->mmap_sem); + return 0; + +out_up: + up_write(&mm->mmap_sem); + return rc; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) + return "[vdso]"; + return NULL; +} + +static int __init vdso_init(void) +{ + int i; + +#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) + /* Calculate the size of the 32 bit vDSO */ + vdso32_pages = ((&vdso32_end - &vdso32_start + + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; + + /* Make sure pages are in the correct state */ + vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), + GFP_KERNEL); + BUG_ON(vdso32_pagelist == NULL); + for (i = 0; i < vdso32_pages - 1; i++) { + struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); + ClearPageReserved(pg); + get_page(pg); + vdso32_pagelist[i] = pg; + } + vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); + vdso32_pagelist[vdso32_pages] = NULL; +#endif + +#ifdef CONFIG_64BIT + /* Calculate the size of the 64 bit vDSO */ + vdso64_pages = ((&vdso64_end - &vdso64_start + + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; + + /* Make sure pages are in the correct state */ + vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), + GFP_KERNEL); + BUG_ON(vdso64_pagelist == NULL); + for (i = 0; i < vdso64_pages - 1; i++) { + struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); + ClearPageReserved(pg); + get_page(pg); + vdso64_pagelist[i] = pg; + } + vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); + vdso64_pagelist[vdso64_pages] = NULL; +#endif /* CONFIG_64BIT */ + + get_page(virt_to_page(vdso_data)); + + smp_wmb(); + + return 0; +} +arch_initcall(vdso_init); + +int in_gate_area_no_task(unsigned long addr) +{ + return 0; +} + +int in_gate_area(struct task_struct *task, unsigned long addr) +{ + return 0; +} + +struct vm_area_struct *get_gate_vma(struct task_struct *tsk) +{ + return NULL; +} diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile new file mode 100644 index 000000000000..ca78ad60ba24 --- /dev/null +++ b/arch/s390/kernel/vdso32/Makefile @@ -0,0 +1,55 @@ +# List of files in the vdso, has to be asm only for now + +obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o + +# Build rules + +targets := $(obj-vdso32) vdso32.so vdso32.so.dbg +obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) + +KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS)) +KBUILD_AFLAGS_31 += -m31 -s + +KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) +KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin +KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) + +$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) +$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) + +obj-y += vdso32_wrapper.o +extra-y += vdso32.lds +CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) + +# Force dependency (incbin is bad) +$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so + +# link rule for the .so file, .lds has to be first +$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) + $(call if_changed,vdso32ld) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# assembly rules for the .S files +$(obj-vdso32): %.o: %.S + $(call if_changed_dep,vdso32as) + +# actual build commands +quiet_cmd_vdso32ld = VDSO32L $@ + cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ +quiet_cmd_vdso32as = VDSO32A $@ + cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso32.so: $(obj)/vdso32.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso32.so diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S new file mode 100644 index 000000000000..9532c4e6a9d2 --- /dev/null +++ b/arch/s390/kernel/vdso32/clock_getres.S @@ -0,0 +1,39 @@ +/* + * Userland implementation of clock_getres() for 32 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + + .text + .align 4 + .globl __kernel_clock_getres + .type __kernel_clock_getres,@function +__kernel_clock_getres: + .cfi_startproc + chi %r2,CLOCK_REALTIME + je 0f + chi %r2,CLOCK_MONOTONIC + jne 3f +0: ltr %r3,%r3 + jz 2f /* res == NULL */ + basr %r1,0 +1: l %r0,4f-1b(%r1) + xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ + st %r0,4(%r3) /* store tp->tv_usec */ +2: lhi %r2,0 + br %r14 +3: lhi %r1,__NR_clock_getres /* fallback to svc */ + svc 0 + br %r14 +4: .long CLOCK_REALTIME_RES + .cfi_endproc + .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S new file mode 100644 index 000000000000..4a98909a8310 --- /dev/null +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -0,0 +1,128 @@ +/* + * Userland implementation of clock_gettime() for 32 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + + .text + .align 4 + .globl __kernel_clock_gettime + .type __kernel_clock_gettime,@function +__kernel_clock_gettime: + .cfi_startproc + basr %r5,0 +0: al %r5,21f-0b(%r5) /* get &_vdso_data */ + chi %r2,CLOCK_REALTIME + je 10f + chi %r2,CLOCK_MONOTONIC + jne 19f + + /* CLOCK_MONOTONIC */ + ltr %r3,%r3 + jz 9f /* tp == NULL */ +1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ + tml %r4,0x0001 /* pending update ? loop */ + jnz 1b + stck 24(%r15) /* Store TOD clock */ + lm %r0,%r1,24(%r15) + s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ + sl %r1,__VDSO_XTIME_STAMP+4(%r5) + brc 3,2f + ahi %r0,-1 +2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ + lr %r2,%r0 + lhi %r0,1000 + ltr %r1,%r1 + mr %r0,%r0 + jnm 3f + ahi %r0,1000 +3: alr %r0,%r2 + srdl %r0,12 + al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r1,__VDSO_XTIME_NSEC+4(%r5) + brc 12,4f + ahi %r0,1 +4: l %r2,__VDSO_XTIME_SEC+4(%r5) + al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ + al %r1,__VDSO_WTOM_NSEC+4(%r5) + brc 12,5f + ahi %r0,1 +5: al %r2,__VDSO_WTOM_SEC+4(%r5) + cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ + jne 1b + basr %r5,0 +6: ltr %r0,%r0 + jnz 7f + cl %r1,20f-6b(%r5) + jl 8f +7: ahi %r2,1 + sl %r1,20f-6b(%r5) + brc 3,6b + ahi %r0,-1 + j 6b +8: st %r2,0(%r3) /* store tp->tv_sec */ + st %r1,4(%r3) /* store tp->tv_nsec */ +9: lhi %r2,0 + br %r14 + + /* CLOCK_REALTIME */ +10: ltr %r3,%r3 /* tp == NULL */ + jz 18f +11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ + tml %r4,0x0001 /* pending update ? loop */ + jnz 11b + stck 24(%r15) /* Store TOD clock */ + lm %r0,%r1,24(%r15) + s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ + sl %r1,__VDSO_XTIME_STAMP+4(%r5) + brc 3,12f + ahi %r0,-1 +12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ + lr %r2,%r0 + lhi %r0,1000 + ltr %r1,%r1 + mr %r0,%r0 + jnm 13f + ahi %r0,1000 +13: alr %r0,%r2 + srdl %r0,12 + al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r1,__VDSO_XTIME_NSEC+4(%r5) + brc 12,14f + ahi %r0,1 +14: l %r2,__VDSO_XTIME_SEC+4(%r5) + cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ + jne 11b + basr %r5,0 +15: ltr %r0,%r0 + jnz 16f + cl %r1,20f-15b(%r5) + jl 17f +16: ahi %r2,1 + sl %r1,20f-15b(%r5) + brc 3,15b + ahi %r0,-1 + j 15b +17: st %r2,0(%r3) /* store tp->tv_sec */ + st %r1,4(%r3) /* store tp->tv_nsec */ +18: lhi %r2,0 + br %r14 + + /* Fallback to system call */ +19: lhi %r1,__NR_clock_gettime + svc 0 + br %r14 + +20: .long 1000000000 +21: .long _vdso_data - 0b + .cfi_endproc + .size __kernel_clock_gettime,.-__kernel_clock_gettime diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S new file mode 100644 index 000000000000..c32f29c3d70c --- /dev/null +++ b/arch/s390/kernel/vdso32/gettimeofday.S @@ -0,0 +1,82 @@ +/* + * Userland implementation of gettimeofday() for 32 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + + .text + .align 4 + .globl __kernel_gettimeofday + .type __kernel_gettimeofday,@function +__kernel_gettimeofday: + .cfi_startproc + basr %r5,0 +0: al %r5,13f-0b(%r5) /* get &_vdso_data */ +1: ltr %r3,%r3 /* check if tz is NULL */ + je 2f + mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) +2: ltr %r2,%r2 /* check if tv is NULL */ + je 10f + l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ + tml %r4,0x0001 /* pending update ? loop */ + jnz 1b + stck 24(%r15) /* Store TOD clock */ + lm %r0,%r1,24(%r15) + s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ + sl %r1,__VDSO_XTIME_STAMP+4(%r5) + brc 3,3f + ahi %r0,-1 +3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ + st %r0,24(%r15) + lhi %r0,1000 + ltr %r1,%r1 + mr %r0,%r0 + jnm 4f + ahi %r0,1000 +4: al %r0,24(%r15) + srdl %r0,12 + al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r1,__VDSO_XTIME_NSEC+4(%r5) + brc 12,5f + ahi %r0,1 +5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) + cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ + jne 1b + l %r4,24(%r15) /* get tv_sec from stack */ + basr %r5,0 +6: ltr %r0,%r0 + jnz 7f + cl %r1,11f-6b(%r5) + jl 8f +7: ahi %r4,1 + sl %r1,11f-6b(%r5) + brc 3,6b + ahi %r0,-1 + j 6b +8: st %r4,0(%r2) /* store tv->tv_sec */ + ltr %r1,%r1 + m %r0,12f-6b(%r5) + jnm 9f + al %r0,12f-6b(%r5) +9: srl %r0,6 + st %r0,4(%r2) /* store tv->tv_usec */ +10: slr %r2,%r2 + br %r14 +11: .long 1000000000 +12: .long 274877907 +13: .long _vdso_data - 0b + .cfi_endproc + .size __kernel_gettimeofday,.-__kernel_gettimeofday diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso32/note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/s390/kernel/vdso32/note.S @@ -0,0 +1,12 @@ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> +#include <linux/elfnote.h> + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S new file mode 100644 index 000000000000..a8c379fa1247 --- /dev/null +++ b/arch/s390/kernel/vdso32/vdso32.lds.S @@ -0,0 +1,138 @@ +/* + * This is the infamous ld script for the 32 bits vdso + * library + */ +#include <asm/vdso.h> + +OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") +OUTPUT_ARCH(s390:31-bit) +ENTRY(_start) + +SECTIONS +{ + . = VDSO32_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + .text : { + *(.text .stub .text.* .gnu.linkonce.t.*) + } :text + PROVIDE(__etext = .); + PROVIDE(_etext = .); + PROVIDE(etext = .); + + /* + * Other stuff is appended to the text segment: + */ + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + + .dynamic : { *(.dynamic) } :text :dynamic + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } + + .rela.dyn ALIGN(8) : { *(.rela.dyn) } + .got ALIGN(8) : { *(.got .toc) } + + _end = .; + PROVIDE(end = .); + + /* + * Stabs debugging sections are here too. + */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + + /* + * DWARF debug sections. + * Symbols in the DWARF debugging sections are relative to the + * beginning of the section so we begin them at 0. + */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + + . = ALIGN(4096); + PROVIDE(_vdso_data = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.branch_lt) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * Very old versions of ld do not recognize this name token; use the constant. + */ +#define PT_GNU_EH_FRAME 0x6474e550 + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + VDSO_VERSION_STRING { + global: + /* + * Has to be there for the kernel to find + */ + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + + local: *; + }; +} diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S new file mode 100644 index 000000000000..61639a89e70b --- /dev/null +++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S @@ -0,0 +1,13 @@ +#include <linux/init.h> +#include <asm/page.h> + + .section ".data.page_aligned" + + .globl vdso32_start, vdso32_end + .balign PAGE_SIZE +vdso32_start: + .incbin "arch/s390/kernel/vdso32/vdso32.so" + .balign PAGE_SIZE +vdso32_end: + + .previous diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile new file mode 100644 index 000000000000..6fc8e829258c --- /dev/null +++ b/arch/s390/kernel/vdso64/Makefile @@ -0,0 +1,55 @@ +# List of files in the vdso, has to be asm only for now + +obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o + +# Build rules + +targets := $(obj-vdso64) vdso64.so vdso64.so.dbg +obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) + +KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) +KBUILD_AFLAGS_64 += -m64 -s + +KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) +KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin +KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) + +$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) +$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) + +obj-y += vdso64_wrapper.o +extra-y += vdso64.lds +CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) + +# Force dependency (incbin is bad) +$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so + +# link rule for the .so file, .lds has to be first +$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) + $(call if_changed,vdso64ld) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# assembly rules for the .S files +$(obj-vdso64): %.o: %.S + $(call if_changed_dep,vdso64as) + +# actual build commands +quiet_cmd_vdso64ld = VDSO64L $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ +quiet_cmd_vdso64as = VDSO64A $@ + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso64.so: $(obj)/vdso64.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso64.so diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S new file mode 100644 index 000000000000..488e31a3c0e7 --- /dev/null +++ b/arch/s390/kernel/vdso64/clock_getres.S @@ -0,0 +1,39 @@ +/* + * Userland implementation of clock_getres() for 64 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + + .text + .align 4 + .globl __kernel_clock_getres + .type __kernel_clock_getres,@function +__kernel_clock_getres: + .cfi_startproc + cghi %r2,CLOCK_REALTIME + je 0f + cghi %r2,CLOCK_MONOTONIC + jne 2f +0: ltgr %r3,%r3 + jz 1f /* res == NULL */ + larl %r1,3f + lg %r0,0(%r1) + xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ + stg %r0,8(%r3) /* store tp->tv_usec */ +1: lghi %r2,0 + br %r14 +2: lghi %r1,__NR_clock_getres /* fallback to svc */ + svc 0 + br %r14 +3: .quad CLOCK_REALTIME_RES + .cfi_endproc + .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S new file mode 100644 index 000000000000..738a410b7eb2 --- /dev/null +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -0,0 +1,89 @@ +/* + * Userland implementation of clock_gettime() for 64 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + + .text + .align 4 + .globl __kernel_clock_gettime + .type __kernel_clock_gettime,@function +__kernel_clock_gettime: + .cfi_startproc + larl %r5,_vdso_data + cghi %r2,CLOCK_REALTIME + je 4f + cghi %r2,CLOCK_MONOTONIC + jne 9f + + /* CLOCK_MONOTONIC */ + ltgr %r3,%r3 + jz 3f /* tp == NULL */ +0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ + tmll %r4,0x0001 /* pending update ? loop */ + jnz 0b + stck 48(%r15) /* Store TOD clock */ + lg %r1,48(%r15) + sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ + mghi %r1,1000 + srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + lg %r0,__VDSO_XTIME_SEC(%r5) + alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ + alg %r0,__VDSO_WTOM_SEC(%r5) + clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ + jne 0b + larl %r5,10f +1: clg %r1,0(%r5) + jl 2f + slg %r1,0(%r5) + aghi %r0,1 + j 1b +2: stg %r0,0(%r3) /* store tp->tv_sec */ + stg %r1,8(%r3) /* store tp->tv_nsec */ +3: lghi %r2,0 + br %r14 + + /* CLOCK_REALTIME */ +4: ltr %r3,%r3 /* tp == NULL */ + jz 8f +5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ + tmll %r4,0x0001 /* pending update ? loop */ + jnz 5b + stck 48(%r15) /* Store TOD clock */ + lg %r1,48(%r15) + sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ + mghi %r1,1000 + srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + lg %r0,__VDSO_XTIME_SEC(%r5) + clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ + jne 5b + larl %r5,10f +6: clg %r1,0(%r5) + jl 7f + slg %r1,0(%r5) + aghi %r0,1 + j 6b +7: stg %r0,0(%r3) /* store tp->tv_sec */ + stg %r1,8(%r3) /* store tp->tv_nsec */ +8: lghi %r2,0 + br %r14 + + /* Fallback to system call */ +9: lghi %r1,__NR_clock_gettime + svc 0 + br %r14 + +10: .quad 1000000000 + .cfi_endproc + .size __kernel_clock_gettime,.-__kernel_clock_gettime diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S new file mode 100644 index 000000000000..f873e75634e1 --- /dev/null +++ b/arch/s390/kernel/vdso64/gettimeofday.S @@ -0,0 +1,56 @@ +/* + * Userland implementation of gettimeofday() for 64 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + + .text + .align 4 + .globl __kernel_gettimeofday + .type __kernel_gettimeofday,@function +__kernel_gettimeofday: + .cfi_startproc + larl %r5,_vdso_data +0: ltgr %r3,%r3 /* check if tz is NULL */ + je 1f + mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) +1: ltgr %r2,%r2 /* check if tv is NULL */ + je 4f + lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ + tmll %r4,0x0001 /* pending update ? loop */ + jnz 0b + stck 48(%r15) /* Store TOD clock */ + lg %r1,48(%r15) + sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ + mghi %r1,1000 + srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ + clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ + jne 0b + larl %r5,5f +2: clg %r1,0(%r5) + jl 3f + slg %r1,0(%r5) + aghi %r0,1 + j 2b +3: stg %r0,0(%r2) /* store tv->tv_sec */ + slgr %r0,%r0 /* tv_nsec -> tv_usec */ + ml %r0,8(%r5) + srlg %r0,%r0,6 + stg %r0,8(%r2) /* store tv->tv_usec */ +4: lghi %r2,0 + br %r14 +5: .quad 1000000000 + .long 274877907 + .cfi_endproc + .size __kernel_gettimeofday,.-__kernel_gettimeofday diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/s390/kernel/vdso64/note.S @@ -0,0 +1,12 @@ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> +#include <linux/elfnote.h> + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..9f5979d102a9 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64.lds.S @@ -0,0 +1,138 @@ +/* + * This is the infamous ld script for the 64 bits vdso + * library + */ +#include <asm/vdso.h> + +OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") +OUTPUT_ARCH(s390:64-bit) +ENTRY(_start) + +SECTIONS +{ + . = VDSO64_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + .text : { + *(.text .stub .text.* .gnu.linkonce.t.*) + } :text + PROVIDE(__etext = .); + PROVIDE(_etext = .); + PROVIDE(etext = .); + + /* + * Other stuff is appended to the text segment: + */ + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + + .dynamic : { *(.dynamic) } :text :dynamic + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } + + .rela.dyn ALIGN(8) : { *(.rela.dyn) } + .got ALIGN(8) : { *(.got .toc) } + + _end = .; + PROVIDE(end = .); + + /* + * Stabs debugging sections are here too. + */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + + /* + * DWARF debug sections. + * Symbols in the DWARF debugging sections are relative to the + * beginning of the section so we begin them at 0. + */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + + . = ALIGN(4096); + PROVIDE(_vdso_data = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.branch_lt) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * Very old versions of ld do not recognize this name token; use the constant. + */ +#define PT_GNU_EH_FRAME 0x6474e550 + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + VDSO_VERSION_STRING { + global: + /* + * Has to be there for the kernel to find + */ + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + + local: *; + }; +} diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S new file mode 100644 index 000000000000..d8e2ac14d564 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64_wrapper.S @@ -0,0 +1,13 @@ +#include <linux/init.h> +#include <asm/page.h> + + .section ".data.page_aligned" + + .globl vdso64_start, vdso64_end + .balign PAGE_SIZE +vdso64_start: + .incbin "arch/s390/kernel/vdso64/vdso64.so" + .balign PAGE_SIZE +vdso64_end: + + .previous diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 0fa5dc5d68e1..75a6e62ea973 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -27,7 +27,6 @@ static ext_int_info_t ext_int_info_timer; static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. @@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires) /* store expire time for this CPU timer */ __get_cpu_var(virt_cpu_timer).to_expire = expires; } -#else -static inline void set_vtimer(__u64 expires) -{ - S390_lowcore.last_update_timer = expires; - asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); - - /* store expire time for this CPU timer */ - __get_cpu_var(virt_cpu_timer).to_expire = expires; -} -#endif void vtime_start_cpu_timer(void) { diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 580fc64cc735..5c8457129603 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -7,6 +7,9 @@ * (C) IBM Corporation 2002-2004 */ +#define KMSG_COMPONENT "extmem" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/kernel.h> #include <linux/string.h> #include <linux/spinlock.h> @@ -24,19 +27,6 @@ #include <asm/cpcmd.h> #include <asm/setup.h> -#define DCSS_DEBUG /* Debug messages on/off */ - -#define DCSS_NAME "extmem" -#ifdef DCSS_DEBUG -#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x) -#else -#define PRINT_DEBUG(x...) do {} while (0) -#endif -#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x) -#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x) -#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x) - - #define DCSS_LOADSHR 0x00 #define DCSS_LOADNSR 0x04 #define DCSS_PURGESEG 0x08 @@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg) goto out_free; } if (diag_cc > 1) { - PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc); + pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc); rc = dcss_diag_translate_rc (vmrc); goto out_free; } @@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg) * -EIO : could not perform query diagnose * -ENOENT : no such segment * -ENOTSUPP: multi-part segment cannot be used with linux - * -ENOSPC : segment cannot be used (overlaps with storage) * -ENOMEM : out of memory * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h */ @@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long goto out_resource; } if (diag_cc > 1) { - PRINT_WARN ("segment_load: could not load segment %s - " - "diag returned error (%ld)\n", - name, end_addr); + pr_warning("Loading DCSS %s failed with rc=%ld\n", name, + end_addr); rc = dcss_diag_translate_rc(end_addr); dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); @@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *addr = seg->start_addr; *end = seg->end; if (do_nonshared) - PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " - "type %s in non-shared mode\n", name, - (void*)seg->start_addr, (void*)seg->end, - segtype_string[seg->vm_segtype]); + pr_info("DCSS %s of range %p to %p and type %s loaded as " + "exclusive-writable\n", name, (void*) seg->start_addr, + (void*) seg->end, segtype_string[seg->vm_segtype]); else { - PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " - "type %s in shared mode\n", name, - (void*)seg->start_addr, (void*)seg->end, - segtype_string[seg->vm_segtype]); + pr_info("DCSS %s of range %p to %p and type %s loaded in " + "shared access mode\n", name, (void*) seg->start_addr, + (void*) seg->end, segtype_string[seg->vm_segtype]); } goto out; out_resource: @@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared) goto out_unlock; } if (do_nonshared == seg->do_nonshared) { - PRINT_INFO ("segment_modify_shared: not reloading segment %s" - " - already in requested mode\n",name); + pr_info("DCSS %s is already in the requested access " + "mode\n", name); rc = 0; goto out_unlock; } if (atomic_read (&seg->ref_count) != 1) { - PRINT_WARN ("segment_modify_shared: not reloading segment %s - " - "segment is in use by other driver(s)\n",name); + pr_warning("DCSS %s is in use and cannot be reloaded\n", + name); rc = -EAGAIN; goto out_unlock; } @@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared) seg->res->flags |= IORESOURCE_READONLY; if (request_resource(&iomem_resource, seg->res)) { - PRINT_WARN("segment_modify_shared: could not reload segment %s" - " - overlapping resources\n", name); + pr_warning("DCSS %s overlaps with used memory resources " + "and cannot be reloaded\n", name); rc = -EBUSY; kfree(seg->res); goto out_del_mem; @@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared) goto out_del_res; } if (diag_cc > 1) { - PRINT_WARN ("segment_modify_shared: could not reload segment %s" - " - diag returned error (%ld)\n", - name, end_addr); + pr_warning("Reloading DCSS %s failed with rc=%ld\n", name, + end_addr); rc = dcss_diag_translate_rc(end_addr); goto out_del_res; } @@ -673,8 +658,7 @@ segment_unload(char *name) mutex_lock(&dcss_lock); seg = segment_by_name (name); if (seg == NULL) { - PRINT_ERR ("could not find segment %s in segment_unload, " - "please report to linux390@de.ibm.com\n",name); + pr_err("Unloading unknown DCSS %s failed\n", name); goto out_unlock; } if (atomic_dec_return(&seg->ref_count) != 0) @@ -709,8 +693,7 @@ segment_save(char *name) seg = segment_by_name (name); if (seg == NULL) { - PRINT_ERR("could not find segment %s in segment_save, please " - "report to linux390@de.ibm.com\n", name); + pr_err("Saving unknown DCSS %s failed\n", name); goto out; } @@ -727,14 +710,14 @@ segment_save(char *name) response = 0; cpcmd(cmd1, NULL, 0, &response); if (response) { - PRINT_ERR("segment_save: DEFSEG failed with response code %i\n", - response); + pr_err("Saving a DCSS failed with DEFSEG response code " + "%i\n", response); goto out; } cpcmd(cmd2, NULL, 0, &response); if (response) { - PRINT_ERR("segment_save: SAVESEG failed with response code %i\n", - response); + pr_err("Saving a DCSS failed with SAVESEG response code " + "%i\n", response); goto out; } out: @@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name) { switch (rc) { case -ENOENT: - PRINT_WARN("cannot load/query segment %s, " - "does not exist\n", seg_name); + pr_err("DCSS %s cannot be loaded or queried\n", seg_name); break; case -ENOSYS: - PRINT_WARN("cannot load/query segment %s, " - "not running on VM\n", seg_name); + pr_err("DCSS %s cannot be loaded or queried without " + "z/VM\n", seg_name); break; case -EIO: - PRINT_WARN("cannot load/query segment %s, " - "hardware error\n", seg_name); + pr_err("Loading or querying DCSS %s resulted in a " + "hardware error\n", seg_name); break; case -ENOTSUPP: - PRINT_WARN("cannot load/query segment %s, " - "is a multi-part segment\n", seg_name); + pr_err("DCSS %s has multiple page ranges and cannot be " + "loaded or queried\n", seg_name); break; case -ENOSPC: - PRINT_WARN("cannot load/query segment %s, " - "overlaps with storage\n", seg_name); + pr_err("DCSS %s overlaps with used storage and cannot " + "be loaded\n", seg_name); break; case -EBUSY: - PRINT_WARN("cannot load/query segment %s, " - "overlaps with already loaded dcss\n", seg_name); + pr_err("%s needs used memory resources and cannot be " + "loaded or queried\n", seg_name); break; case -EPERM: - PRINT_WARN("cannot load/query segment %s, " - "already loaded in incompatible mode\n", seg_name); + pr_err("DCSS %s is already loaded in a different access " + "mode\n", seg_name); break; case -ENOMEM: - PRINT_WARN("cannot load/query segment %s, " - "out of memory\n", seg_name); + pr_err("There is not enough memory to load or query " + "DCSS %s\n", seg_name); break; case -ERANGE: - PRINT_WARN("cannot load/query segment %s, " - "exceeds kernel mapping range\n", seg_name); + pr_err("DCSS %s exceeds the kernel mapping range (%lu) " + "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS); break; default: - PRINT_WARN("cannot load/query segment %s, " - "return value %i\n", seg_name, rc); break; } } diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5c9cbfc14c4d..f32a5197128d 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -13,6 +13,7 @@ config SUPERH select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT select HAVE_IOREMAP_PROT if MMU + select HAVE_ARCH_TRACEHOOK help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast @@ -23,8 +24,10 @@ config SUPERH32 def_bool !SUPERH64 select HAVE_KPROBES select HAVE_KRETPROBES - select HAVE_ARCH_TRACEHOOK select HAVE_FUNCTION_TRACER + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_DYNAMIC_FTRACE + select HAVE_ARCH_KGDB config SUPERH64 def_bool y if CPU_SH5 @@ -55,8 +58,6 @@ config GENERIC_HARDIRQS config GENERIC_HARDIRQS_NO__DO_IRQ def_bool y - depends on SUPERH32 && (!SH_DREAMCAST && !SH_SH4202_MICRODEV && \ - !SH_7751_SYSTEMH && !HD64461) config GENERIC_IRQ_PROBE def_bool y @@ -85,10 +86,17 @@ config GENERIC_LOCKBREAK config SYS_SUPPORTS_PM bool + depends on !SMP + +config ARCH_SUSPEND_POSSIBLE + def_bool n + +config ARCH_HIBERNATION_POSSIBLE + def_bool n config SYS_SUPPORTS_APM_EMULATION bool - select SYS_SUPPORTS_PM + select ARCH_SUSPEND_POSSIBLE config SYS_SUPPORTS_SMP bool @@ -183,6 +191,11 @@ config CPU_SUBTYPE_SH7619 # SH-2A Processor Support +config CPU_SUBTYPE_SH7201 + bool "Support SH7201 processor" + select CPU_SH2A + select CPU_HAS_FPU + config CPU_SUBTYPE_SH7203 bool "Support SH7203 processor" select CPU_SH2A @@ -456,8 +469,12 @@ config SH_CPU_FREQ depends on CPU_FREQ select CPU_FREQ_TABLE help - This adds the cpufreq driver for SuperH. At present, only - the SH-4 is supported. + This adds the cpufreq driver for SuperH. Any CPU that supports + clock rate rounding through the clock framework can use this + driver. While it will make the kernel slightly larger, this is + harmless for CPUs that don't support rate rounding. The driver + will also generate a notice in the boot log before disabling + itself if the CPU in question is not capable of rate rounding. For details, take a look at <file:Documentation/cpu-freq>. @@ -469,9 +486,6 @@ source "arch/sh/drivers/Kconfig" endmenu -config ISA_DMA_API - bool - menu "Kernel features" source kernel/Kconfig.hz @@ -688,49 +702,6 @@ config MAPLE Dreamcast with a serial line terminal or a remote network connection. -config CF_ENABLER - bool "Compact Flash Enabler support" - depends on SOLUTION_ENGINE || SH_SH03 - ---help--- - Compact Flash is a small, removable mass storage device introduced - in 1994 originally as a PCMCIA device. If you say `Y' here, you - compile in support for Compact Flash devices directly connected to - a SuperH processor. A Compact Flash FAQ is available at - <http://www.compactflash.org/faqs/faq.htm>. - - If your board has "Directly Connected" CompactFlash at area 5 or 6, - you may want to enable this option. Then, you can use CF as - primary IDE drive (only tested for SanDisk). - - If in doubt, select 'N'. - -choice - prompt "Compact Flash Connection Area" - depends on CF_ENABLER - default CF_AREA6 - -config CF_AREA5 - bool "Area5" - help - If your board has "Directly Connected" CompactFlash, You should - select the area where your CF is connected to. - - - "Area5" if CompactFlash is connected to Area 5 (0x14000000) - - "Area6" if it is connected to Area 6 (0x18000000) - - "Area6" will work for most boards. - -config CF_AREA6 - bool "Area6" - -endchoice - -config CF_BASE_ADDR - hex - depends on CF_ENABLER - default "0xb8000000" if CF_AREA6 - default "0xb4000000" if CF_AREA5 - source "arch/sh/drivers/pci/Kconfig" source "drivers/pci/Kconfig" @@ -748,13 +719,11 @@ source "fs/Kconfig.binfmt" endmenu menu "Power management options (EXPERIMENTAL)" -depends on EXPERIMENTAL && SYS_SUPPORTS_PM +depends on EXPERIMENTAL -config ARCH_SUSPEND_POSSIBLE - def_bool y - depends on !SMP +source "kernel/power/Kconfig" -source kernel/power/Kconfig +source "drivers/cpuidle/Kconfig" endmenu diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index e6d2c8b11abd..0d62681f72a0 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -98,18 +98,29 @@ config IRQSTACKS for handling hard and soft interrupts. This can help avoid overflowing the process kernel stacks. -config SH_KGDB - bool "Include KGDB kernel debugger" - select FRAME_POINTER - select DEBUG_INFO - depends on CPU_SH3 || CPU_SH4 +config DUMP_CODE + bool "Show disassembly of nearby code in register dumps" + depends on DEBUG_KERNEL && SUPERH32 + default y if DEBUG_BUGVERBOSE + default n + help + This prints out a code trace of the instructions leading up to + the faulting instruction as a debugging aid. As this does grow + the kernel in size a bit, most users will want to say N here. + + Those looking for more verbose debugging output should say Y. + +config SH_NO_BSS_INIT + bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)" + depends on DEBUG_KERNEL + default n help - Include in-kernel hooks for kgdb, the Linux kernel source level - debugger. See <http://kgdb.sourceforge.net/> for more information. - Unless you are intending to debug the kernel, say N here. + If running in painfully slow environments, such as an RTL + simulation or from remote memory via SHdebug, where the memory + can already be gauranteed to ber zeroed on boot, say Y. -menu "KGDB configuration options" - depends on SH_KGDB + For all other cases, say N. If this option seems perplexing, or + you aren't sure, say N. config MORE_COMPILE_OPTIONS bool "Add any additional compile options" @@ -122,85 +133,16 @@ config COMPILE_OPTIONS string "Additional compile arguments" depends on MORE_COMPILE_OPTIONS -config KGDB_NMI - def_bool n - prompt "Enter KGDB on NMI" - -config SH_KGDB_CONSOLE - def_bool n - prompt "Console messages through GDB" - depends on !SERIAL_SH_SCI_CONSOLE && SERIAL_SH_SCI=y - select SERIAL_CORE_CONSOLE - -config KGDB_SYSRQ - def_bool y - prompt "Allow SysRq 'G' to enter KGDB" - depends on MAGIC_SYSRQ - -comment "Serial port setup" - -config KGDB_DEFPORT - int "Port number (ttySCn)" - default "1" - -config KGDB_DEFBAUD - int "Baud rate" - default "115200" - -choice - prompt "Parity" - depends on SH_KGDB - default KGDB_DEFPARITY_N - -config KGDB_DEFPARITY_N - bool "None" - -config KGDB_DEFPARITY_E - bool "Even" - -config KGDB_DEFPARITY_O - bool "Odd" - -endchoice - -choice - prompt "Data bits" - depends on SH_KGDB - default KGDB_DEFBITS_8 - -config KGDB_DEFBITS_8 - bool "8" - -config KGDB_DEFBITS_7 - bool "7" - -endchoice - -endmenu - -if SUPERH64 - -config SH64_PROC_ASIDS - bool "Debug: report ASIDs through /proc/asids" - depends on PROC_FS && MMU - config SH64_SR_WATCH bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" + depends on SUPERH64 config POOR_MANS_STRACE bool "Debug: enable rudimentary strace facility" + depends on SUPERH64 help This option allows system calls to be traced to the console. It also aids in detecting kernel stack underflow. It is useful for debugging early-userland problems (e.g. init incurring fatal exceptions.) -config SH_ALPHANUMERIC - bool "Enable debug outputs to on-board alphanumeric display" - depends on SH_CAYMAN - -config SH_NO_BSS_INIT - bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)" - -endif - endmenu diff --git a/arch/sh/Makefile b/arch/sh/Makefile index c43eb0d7fa3b..4067b0d9287b 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -32,6 +32,7 @@ cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ $(call cc-option,-mno-implicit-fp,-m4-nofpu) cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a,) \ $(call cc-option,-m4a-nofpu,) +cflags-$(CONFIG_CPU_SH4AL_DSP) += $(call cc-option,-m4al,) cflags-$(CONFIG_CPU_SH5) := $(call cc-option,-m5-32media-nofpu,) ifeq ($(cflags-y),) @@ -39,22 +40,16 @@ ifeq ($(cflags-y),) # In the case where we are stuck with a compiler that has been uselessly # restricted to a particular ISA, a favourite default of newer GCCs when # extensive multilib targets are not provided, ensure we get the best fit -# regarding FP generation. This is necessary to avoid references to FP -# variants in libgcc where integer variants exist, which otherwise result -# in link errors. This is intentionally stupid (albeit many orders of -# magnitude less than GCC's default behaviour), as anything with a large -# number of multilib targets better have been built correctly for -# the target in mind. +# regarding FP generation. This is intentionally stupid (albeit many +# orders of magnitude less than GCC's default behaviour), as anything +# with a large number of multilib targets better have been built +# correctly for the target in mind. # cflags-y += $(shell $(CC) $(KBUILD_CFLAGS) -print-multi-lib | \ grep nofpu | sed q | sed -e 's/^/-/;s/;.*$$//') -endif - -cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mb -cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -ml - -cflags-y += $(call cc-option,-mno-fdpic) - +# At this point, anything goes. +isaflags-y := $(call as-option,-Wa$(comma)-isa=any,) +else # # -Wa,-isa= tuning implies -Wa,-dsp for the versions of binutils that # support it, while -Wa,-dsp by itself limits the range of usable opcodes @@ -67,7 +62,12 @@ isaflags-y := $(call as-option,-Wa$(comma)-isa=$(isa-y),) isaflags-$(CONFIG_SH_DSP) := \ $(call as-option,-Wa$(comma)-isa=$(isa-y),-Wa$(comma)-dsp) +endif +cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mb +cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -ml + +cflags-y += $(call cc-option,-mno-fdpic) cflags-y += $(isaflags-y) -ffreestanding cflags-$(CONFIG_MORE_COMPILE_OPTIONS) += \ @@ -79,6 +79,9 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment \ # Give the various platforms the opportunity to set default image types defaultimage-$(CONFIG_SUPERH32) := zImage defaultimage-$(CONFIG_SH_SH7785LCR) := uImage +defaultimage-$(CONFIG_SH_RSK) := uImage +defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux +defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux # Set some sensible Kbuild defaults KBUILD_DEFCONFIG := shx3_defconfig @@ -132,6 +135,7 @@ machdir-$(CONFIG_SH_LANDISK) += mach-landisk machdir-$(CONFIG_SH_TITAN) += mach-titan machdir-$(CONFIG_SH_LBOX_RE2) += mach-lboxre2 machdir-$(CONFIG_SH_CAYMAN) += mach-cayman +machdir-$(CONFIG_SH_RSK) += mach-rsk ifneq ($(machdir-y),) core-y += $(addprefix arch/sh/boards/, \ @@ -173,11 +177,8 @@ KBUILD_CFLAGS += -pipe $(cflags-y) KBUILD_CPPFLAGS += $(cflags-y) KBUILD_AFLAGS += $(cflags-y) -LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) - libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) -libs-y += $(LIBGCC) PHONY += maketools FORCE diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 50467f9d0d0b..861914747e4e 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -126,10 +126,12 @@ config SH_RTS7751R2D Select RTS7751R2D if configuring for a Renesas Technology Sales SH-Graphics board. -config SH_RSK7203 - bool "RSK7203" - select GENERIC_GPIO - depends on CPU_SUBTYPE_SH7203 +config SH_RSK + bool "Renesas Starter Kit" + depends on CPU_SUBTYPE_SH7201 || CPU_SUBTYPE_SH7203 + help + Select this option if configuring for any of the RSK+ MCU + evaluation platforms. config SH_SDK7780 bool "SDK7780R3" @@ -253,6 +255,7 @@ source "arch/sh/boards/mach-r2d/Kconfig" source "arch/sh/boards/mach-highlander/Kconfig" source "arch/sh/boards/mach-sdk7780/Kconfig" source "arch/sh/boards/mach-migor/Kconfig" +source "arch/sh/boards/mach-rsk/Kconfig" if SH_MAGIC_PANEL_R2 diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index d9efa3923721..269ae2be49ef 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile @@ -3,7 +3,6 @@ # obj-$(CONFIG_SH_AP325RXA) += board-ap325rxa.o obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o -obj-$(CONFIG_SH_RSK7203) += board-rsk7203.o obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o obj-$(CONFIG_SH_SHMIN) += board-shmin.o obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 8881a643ac32..1c67cba6e34f 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -197,6 +197,10 @@ static struct resource lcdc_resources[] = { .end = 0xfe941fff, .flags = IORESOURCE_MEM, }, + [1] = { + .start = 28, + .flags = IORESOURCE_IRQ, + }, }; static struct platform_device lcdc_device = { @@ -303,6 +307,7 @@ static struct resource ceu_resources[] = { static struct platform_device ceu_device = { .name = "sh_mobile_ceu", + .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(ceu_resources), .resource = ceu_resources, .dev = { @@ -344,7 +349,6 @@ static int __init ap325rxa_devices_setup(void) gpio_export(GPIO_PTF7, 0); /* LCDC */ - clk_always_enable("mstp200"); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); @@ -375,7 +379,6 @@ static int __init ap325rxa_devices_setup(void) gpio_direction_output(GPIO_PTS3, 1); /* CEU */ - clk_always_enable("mstp203"); gpio_request(GPIO_FN_VIO_CLK2, NULL); gpio_request(GPIO_FN_VIO_VD2, NULL); gpio_request(GPIO_FN_VIO_HD2, NULL); diff --git a/arch/sh/boards/board-shmin.c b/arch/sh/boards/board-shmin.c index 5cc0867de5ab..b1dcbbc89188 100644 --- a/arch/sh/boards/board-shmin.c +++ b/arch/sh/boards/board-shmin.c @@ -22,21 +22,13 @@ static void __init init_shmin_irq(void) plat_irq_setup_pins(IRQ_MODE_IRQ); } -static void __iomem *shmin_ioport_map(unsigned long port, unsigned int size) +static void __init shmin_setup(char **cmdline_p) { - static int dummy; - - if ((port & ~0x1f) == SHMIN_NE_BASE) - return (void __iomem *)(SHMIN_IO_BASE + port); - - dummy = 0; - - return &dummy; - + __set_io_port_base(SHMIN_IO_BASE); } static struct sh_machine_vector mv_shmin __initmv = { .mv_name = "SHMIN", + .mv_setup = shmin_setup, .mv_init_irq = init_shmin_irq, - .mv_ioport_map = shmin_ioport_map, }; diff --git a/arch/sh/boards/mach-cayman/Makefile b/arch/sh/boards/mach-cayman/Makefile index 489a8f867368..cafe1ac3b29c 100644 --- a/arch/sh/boards/mach-cayman/Makefile +++ b/arch/sh/boards/mach-cayman/Makefile @@ -2,4 +2,3 @@ # Makefile for the Hitachi Cayman specific parts of the kernel # obj-y := setup.o irq.o -obj-$(CONFIG_HEARTBEAT) += led.o diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c index ceb37ae92c70..da62ad516994 100644 --- a/arch/sh/boards/mach-cayman/irq.c +++ b/arch/sh/boards/mach-cayman/irq.c @@ -94,31 +94,11 @@ static void ack_cayman_irq(unsigned int irq) disable_cayman_irq(irq); } -static void end_cayman_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_cayman_irq(irq); -} - -static unsigned int startup_cayman_irq(unsigned int irq) -{ - enable_cayman_irq(irq); - return 0; /* never anything pending */ -} - -static void shutdown_cayman_irq(unsigned int irq) -{ - disable_cayman_irq(irq); -} - -struct hw_interrupt_type cayman_irq_type = { - .typename = "Cayman-IRQ", - .startup = startup_cayman_irq, - .shutdown = shutdown_cayman_irq, - .enable = enable_cayman_irq, - .disable = disable_cayman_irq, - .ack = ack_cayman_irq, - .end = end_cayman_irq, +struct irq_chip cayman_irq_type = { + .name = "Cayman-IRQ", + .unmask = enable_cayman_irq, + .mask = disable_cayman_irq, + .mask_ack = ack_cayman_irq, }; int cayman_irq_demux(int evt) @@ -187,8 +167,9 @@ void init_cayman_irq(void) return; } - for (i=0; i<NR_EXT_IRQS; i++) { - irq_desc[START_EXT_IRQS + i].chip = &cayman_irq_type; + for (i = 0; i < NR_EXT_IRQS; i++) { + set_irq_chip_and_handler(START_EXT_IRQS + i, &cayman_irq_type, + handle_level_irq); } /* Setup the SMSC interrupt */ diff --git a/arch/sh/boards/mach-cayman/led.c b/arch/sh/boards/mach-cayman/led.c deleted file mode 100644 index a808eac4ecd6..000000000000 --- a/arch/sh/boards/mach-cayman/led.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * arch/sh/boards/cayman/led.c - * - * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com> - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Flash the LEDs - */ -#include <asm/io.h> - -/* -** It is supposed these functions to be used for a low level -** debugging (via Cayman LEDs), hence to be available as soon -** as possible. -** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped -** (this happen when IRQ are initialized... quite late). -** These triky dependencies should be removed. Temporary, it -** may be enough to NOP until EPLD is mapped. -*/ - -extern unsigned long epld_virt; - -#define LED_ADDR (epld_virt + 0x008) -#define HDSP2534_ADDR (epld_virt + 0x100) - -void mach_led(int position, int value) -{ - if (!epld_virt) - return; - - if (value) - ctrl_outl(0, LED_ADDR); - else - ctrl_outl(1, LED_ADDR); - -} - -void mach_alphanum(int position, unsigned char value) -{ - if (!epld_virt) - return; - - ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2)); -} - -void mach_alphanum_brightness(int setting) -{ - ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0); -} diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c index 67bdc33dd411..f55fc8e795e9 100644 --- a/arch/sh/boards/mach-dreamcast/irq.c +++ b/arch/sh/boards/mach-dreamcast/irq.c @@ -10,106 +10,90 @@ */ #include <linux/irq.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/irq.h> #include <mach/sysasic.h> -/* Dreamcast System ASIC Hardware Events - - - The Dreamcast's System ASIC (a.k.a. Holly) is responsible for receiving - hardware events from system peripherals and triggering an SH7750 IRQ. - Hardware events can trigger IRQs 13, 11, or 9 depending on which bits are - set in the Event Mask Registers (EMRs). When a hardware event is - triggered, it's corresponding bit in the Event Status Registers (ESRs) - is set, and that bit should be rewritten to the ESR to acknowledge that - event. - - There are three 32-bit ESRs located at 0xa05f8900 - 0xa05f6908. Event - types can be found in include/asm-sh/dreamcast/sysasic.h. There are three - groups of EMRs that parallel the ESRs. Each EMR group corresponds to an - IRQ, so 0xa05f6910 - 0xa05f6918 triggers IRQ 13, 0xa05f6920 - 0xa05f6928 - triggers IRQ 11, and 0xa05f6930 - 0xa05f6938 triggers IRQ 9. - - In the kernel, these events are mapped to virtual IRQs so that drivers can - respond to them as they would a normal interrupt. In order to keep this - mapping simple, the events are mapped as: - - 6900/6910 - Events 0-31, IRQ 13 - 6904/6924 - Events 32-63, IRQ 11 - 6908/6938 - Events 64-95, IRQ 9 - -*/ +/* + * Dreamcast System ASIC Hardware Events - + * + * The Dreamcast's System ASIC (a.k.a. Holly) is responsible for receiving + * hardware events from system peripherals and triggering an SH7750 IRQ. + * Hardware events can trigger IRQs 13, 11, or 9 depending on which bits are + * set in the Event Mask Registers (EMRs). When a hardware event is + * triggered, its corresponding bit in the Event Status Registers (ESRs) + * is set, and that bit should be rewritten to the ESR to acknowledge that + * event. + * + * There are three 32-bit ESRs located at 0xa05f6900 - 0xa05f6908. Event + * types can be found in arch/sh/include/mach-dreamcast/mach/sysasic.h. + * There are three groups of EMRs that parallel the ESRs. Each EMR group + * corresponds to an IRQ, so 0xa05f6910 - 0xa05f6918 triggers IRQ 13, + * 0xa05f6920 - 0xa05f6928 triggers IRQ 11, and 0xa05f6930 - 0xa05f6938 + * triggers IRQ 9. + * + * In the kernel, these events are mapped to virtual IRQs so that drivers can + * respond to them as they would a normal interrupt. In order to keep this + * mapping simple, the events are mapped as: + * + * 6900/6910 - Events 0-31, IRQ 13 + * 6904/6924 - Events 32-63, IRQ 11 + * 6908/6938 - Events 64-95, IRQ 9 + * + */ #define ESR_BASE 0x005f6900 /* Base event status register */ #define EMR_BASE 0x005f6910 /* Base event mask register */ -/* Helps us determine the EMR group that this event belongs to: 0 = 0x6910, - 1 = 0x6920, 2 = 0x6930; also determine the event offset */ +/* + * Helps us determine the EMR group that this event belongs to: 0 = 0x6910, + * 1 = 0x6920, 2 = 0x6930; also determine the event offset. + */ #define LEVEL(event) (((event) - HW_EVENT_IRQ_BASE) / 32) /* Return the hardware event's bit positon within the EMR/ESR */ #define EVENT_BIT(event) (((event) - HW_EVENT_IRQ_BASE) & 31) -/* For each of these *_irq routines, the IRQ passed in is the virtual IRQ - (logically mapped to the corresponding bit for the hardware event). */ +/* + * For each of these *_irq routines, the IRQ passed in is the virtual IRQ + * (logically mapped to the corresponding bit for the hardware event). + */ /* Disable the hardware event by masking its bit in its EMR */ static inline void disable_systemasic_irq(unsigned int irq) { - __u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2); - __u32 mask; + __u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2); + __u32 mask; - mask = inl(emr); - mask &= ~(1 << EVENT_BIT(irq)); - outl(mask, emr); + mask = inl(emr); + mask &= ~(1 << EVENT_BIT(irq)); + outl(mask, emr); } /* Enable the hardware event by setting its bit in its EMR */ static inline void enable_systemasic_irq(unsigned int irq) { - __u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2); - __u32 mask; + __u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2); + __u32 mask; - mask = inl(emr); - mask |= (1 << EVENT_BIT(irq)); - outl(mask, emr); + mask = inl(emr); + mask |= (1 << EVENT_BIT(irq)); + outl(mask, emr); } /* Acknowledge a hardware event by writing its bit back to its ESR */ -static void ack_systemasic_irq(unsigned int irq) -{ - __u32 esr = ESR_BASE + (LEVEL(irq) << 2); - disable_systemasic_irq(irq); - outl((1 << EVENT_BIT(irq)), esr); -} - -/* After a IRQ has been ack'd and responded to, it needs to be renabled */ -static void end_systemasic_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_systemasic_irq(irq); -} - -static unsigned int startup_systemasic_irq(unsigned int irq) -{ - enable_systemasic_irq(irq); - - return 0; -} - -static void shutdown_systemasic_irq(unsigned int irq) +static void mask_ack_systemasic_irq(unsigned int irq) { - disable_systemasic_irq(irq); + __u32 esr = ESR_BASE + (LEVEL(irq) << 2); + disable_systemasic_irq(irq); + outl((1 << EVENT_BIT(irq)), esr); } -struct hw_interrupt_type systemasic_int = { - .typename = "System ASIC", - .startup = startup_systemasic_irq, - .shutdown = shutdown_systemasic_irq, - .enable = enable_systemasic_irq, - .disable = disable_systemasic_irq, - .ack = ack_systemasic_irq, - .end = end_systemasic_irq, +struct irq_chip systemasic_int = { + .name = "System ASIC", + .mask = disable_systemasic_irq, + .mask_ack = mask_ack_systemasic_irq, + .unmask = enable_systemasic_irq, }; /* @@ -117,37 +101,37 @@ struct hw_interrupt_type systemasic_int = { */ int systemasic_irq_demux(int irq) { - __u32 emr, esr, status, level; - __u32 j, bit; - - switch (irq) { - case 13: - level = 0; - break; - case 11: - level = 1; - break; - case 9: - level = 2; - break; - default: - return irq; - } - emr = EMR_BASE + (level << 4) + (level << 2); - esr = ESR_BASE + (level << 2); - - /* Mask the ESR to filter any spurious, unwanted interrupts */ - status = inl(esr); - status &= inl(emr); - - /* Now scan and find the first set bit as the event to map */ - for (bit = 1, j = 0; j < 32; bit <<= 1, j++) { - if (status & bit) { - irq = HW_EVENT_IRQ_BASE + j + (level << 5); - return irq; - } - } - - /* Not reached */ - return irq; + __u32 emr, esr, status, level; + __u32 j, bit; + + switch (irq) { + case 13: + level = 0; + break; + case 11: + level = 1; + break; + case 9: + level = 2; + break; + default: + return irq; + } + emr = EMR_BASE + (level << 4) + (level << 2); + esr = ESR_BASE + (level << 2); + + /* Mask the ESR to filter any spurious, unwanted interrupts */ + status = inl(esr); + status &= inl(emr); + + /* Now scan and find the first set bit as the event to map */ + for (bit = 1, j = 0; j < 32; bit <<= 1, j++) { + if (status & bit) { + irq = HW_EVENT_IRQ_BASE + j + (level << 5); + return irq; + } + } + + /* Not reached */ + return irq; } diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c index 7d944fc75e93..d1bee4884cd6 100644 --- a/arch/sh/boards/mach-dreamcast/setup.c +++ b/arch/sh/boards/mach-dreamcast/setup.c @@ -28,7 +28,7 @@ #include <asm/machvec.h> #include <mach/sysasic.h> -extern struct hw_interrupt_type systemasic_int; +extern struct irq_chip systemasic_int; extern void aica_time_init(void); extern int gapspci_init(void); extern int systemasic_irq_demux(int); @@ -47,7 +47,8 @@ static void __init dreamcast_setup(char **cmdline_p) /* Assign all virtual IRQs to the System ASIC int. handler */ for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++) - irq_desc[i].chip = &systemasic_int; + set_irq_chip_and_handler(i, &systemasic_int, + handle_level_irq); board_time_init = aica_time_init; diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile index 14bdd531f116..cd54acb51499 100644 --- a/arch/sh/boards/mach-edosk7705/Makefile +++ b/arch/sh/boards/mach-edosk7705/Makefile @@ -3,4 +3,3 @@ # obj-y := setup.o io.o - diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c index 7d153e50a01b..5b9c57c43241 100644 --- a/arch/sh/boards/mach-edosk7705/io.c +++ b/arch/sh/boards/mach-edosk7705/io.c @@ -10,28 +10,24 @@ #include <linux/kernel.h> #include <linux/types.h> -#include <asm/io.h> +#include <linux/io.h> #include <mach/edosk7705.h> #include <asm/addrspace.h> #define SMC_IOADDR 0xA2000000 -#define maybebadio(name,port) \ - printk("bad PC-like io %s for port 0x%lx at 0x%08x\n", \ - #name, (port), (__u32) __builtin_return_address(0)) - /* Map the Ethernet addresses as if it is at 0x300 - 0x320 */ -unsigned long sh_edosk7705_isa_port2addr(unsigned long port) +static unsigned long sh_edosk7705_isa_port2addr(unsigned long port) { - if (port >= 0x300 && port < 0x320) { - /* SMC91C96 registers are 4 byte aligned rather than the - * usual 2 byte! - */ - return SMC_IOADDR + ( (port - 0x300) * 2); - } + /* + * SMC91C96 registers are 4 byte aligned rather than the + * usual 2 byte! + */ + if (port >= 0x300 && port < 0x320) + return SMC_IOADDR + ((port - 0x300) * 2); - maybebadio(sh_edosk7705_isa_port2addr, port); - return port; + maybebadio(port); + return port; } /* Trying to read / write bytes on odd-byte boundaries to the Ethernet @@ -42,53 +38,34 @@ unsigned long sh_edosk7705_isa_port2addr(unsigned long port) */ unsigned char sh_edosk7705_inb(unsigned long port) { - if (port >= 0x300 && port < 0x320 && port & 0x01) { - return (volatile unsigned char)(generic_inw(port -1) >> 8); - } - return *(volatile unsigned char *)sh_edosk7705_isa_port2addr(port); -} + if (port >= 0x300 && port < 0x320 && port & 0x01) + return __raw_readw(port - 1) >> 8; -unsigned int sh_edosk7705_inl(unsigned long port) -{ - return *(volatile unsigned long *)port; + return __raw_readb(sh_edosk7705_isa_port2addr(port)); } void sh_edosk7705_outb(unsigned char value, unsigned long port) { if (port >= 0x300 && port < 0x320 && port & 0x01) { - generic_outw(((unsigned short)value << 8), port -1); + __raw_writew(((unsigned short)value << 8), port - 1); return; } - *(volatile unsigned char *)sh_edosk7705_isa_port2addr(port) = value; -} -void sh_edosk7705_outl(unsigned int value, unsigned long port) -{ - *(volatile unsigned long *)port = value; + __raw_writeb(value, sh_edosk7705_isa_port2addr(port)); } void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count) { unsigned char *p = addr; - while (count--) *p++ = sh_edosk7705_inb(port); -} -void sh_edosk7705_insl(unsigned long port, void *addr, unsigned long count) -{ - unsigned long *p = (unsigned long*)addr; while (count--) - *p++ = *(volatile unsigned long *)port; + *p++ = sh_edosk7705_inb(port); } void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count) { - unsigned char *p = (unsigned char*)addr; - while (count--) sh_edosk7705_outb(*p++, port); -} + unsigned char *p = (unsigned char *)addr; -void sh_edosk7705_outsl(unsigned long port, const void *addr, unsigned long count) -{ - unsigned long *p = (unsigned long*)addr; - while (count--) sh_edosk7705_outl(*p++, port); + while (count--) + sh_edosk7705_outb(*p++, port); } - diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c index ab3f47bffdf3..d59225e26fb9 100644 --- a/arch/sh/boards/mach-edosk7705/setup.c +++ b/arch/sh/boards/mach-edosk7705/setup.c @@ -9,6 +9,7 @@ * board by S. Dunn, 2003. */ #include <linux/init.h> +#include <linux/irq.h> #include <asm/machvec.h> #include <mach/edosk7705.h> @@ -26,18 +27,10 @@ static struct sh_machine_vector mv_edosk7705 __initmv = { .mv_nr_irqs = 80, .mv_inb = sh_edosk7705_inb, - .mv_inl = sh_edosk7705_inl, .mv_outb = sh_edosk7705_outb, - .mv_outl = sh_edosk7705_outl, - - .mv_inl_p = sh_edosk7705_inl, - .mv_outl_p = sh_edosk7705_outl, .mv_insb = sh_edosk7705_insb, - .mv_insl = sh_edosk7705_insl, .mv_outsb = sh_edosk7705_outsb, - .mv_outsl = sh_edosk7705_outsl, - .mv_isa_port2addr = sh_edosk7705_isa_port2addr, .mv_init_irq = sh_edosk7705_init_irq, }; diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c index 64af1f2eef05..d936c1af7620 100644 --- a/arch/sh/boards/mach-hp6xx/pm.c +++ b/arch/sh/boards/mach-hp6xx/pm.c @@ -10,15 +10,91 @@ #include <linux/suspend.h> #include <linux/errno.h> #include <linux/time.h> +#include <linux/delay.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/hd64461.h> #include <mach/hp6xx.h> #include <cpu/dac.h> -#include <asm/pm.h> +#include <asm/freq.h> +#include <asm/watchdog.h> + +#define INTR_OFFSET 0x600 #define STBCR 0xffffff82 #define STBCR2 0xffffff88 +#define STBCR_STBY 0x80 +#define STBCR_MSTP2 0x04 + +#define MCR 0xffffff68 +#define RTCNT 0xffffff70 + +#define MCR_RMODE 2 +#define MCR_RFSH 4 + +extern u8 wakeup_start; +extern u8 wakeup_end; + +static void pm_enter(void) +{ + u8 stbcr, csr; + u16 frqcr, mcr; + u32 vbr_new, vbr_old; + + set_bl_bit(); + + /* set wdt */ + csr = sh_wdt_read_csr(); + csr &= ~WTCSR_TME; + csr |= WTCSR_CKS_4096; + sh_wdt_write_csr(csr); + csr = sh_wdt_read_csr(); + sh_wdt_write_cnt(0); + + /* disable PLL1 */ + frqcr = ctrl_inw(FRQCR); + frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY); + ctrl_outw(frqcr, FRQCR); + + /* enable standby */ + stbcr = ctrl_inb(STBCR); + ctrl_outb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR); + + /* set self-refresh */ + mcr = ctrl_inw(MCR); + ctrl_outw(mcr & ~MCR_RFSH, MCR); + + /* set interrupt handler */ + asm volatile("stc vbr, %0" : "=r" (vbr_old)); + vbr_new = get_zeroed_page(GFP_ATOMIC); + udelay(50); + memcpy((void*)(vbr_new + INTR_OFFSET), + &wakeup_start, &wakeup_end - &wakeup_start); + asm volatile("ldc %0, vbr" : : "r" (vbr_new)); + + ctrl_outw(0, RTCNT); + ctrl_outw(mcr | MCR_RFSH | MCR_RMODE, MCR); + + cpu_sleep(); + + asm volatile("ldc %0, vbr" : : "r" (vbr_old)); + + free_page(vbr_new); + + /* enable PLL1 */ + frqcr = ctrl_inw(FRQCR); + frqcr |= FRQCR_PSTBY; + ctrl_outw(frqcr, FRQCR); + udelay(50); + frqcr |= FRQCR_PLLEN; + ctrl_outw(frqcr, FRQCR); + + ctrl_outb(stbcr, STBCR); + + clear_bl_bit(); +} + static int hp6x0_pm_enter(suspend_state_t state) { u8 stbcr, stbcr2; diff --git a/arch/sh/boards/mach-microdev/Makefile b/arch/sh/boards/mach-microdev/Makefile index 1387dd6c85eb..4e3588e8806b 100644 --- a/arch/sh/boards/mach-microdev/Makefile +++ b/arch/sh/boards/mach-microdev/Makefile @@ -2,7 +2,4 @@ # Makefile for the SuperH MicroDev specific parts of the kernel # -obj-y := setup.o irq.o io.o - -obj-$(CONFIG_HEARTBEAT) += led.o - +obj-y := setup.o irq.o io.o fdc37c93xapm.o diff --git a/arch/sh/boards/mach-microdev/fdc37c93xapm.c b/arch/sh/boards/mach-microdev/fdc37c93xapm.c new file mode 100644 index 000000000000..458a7cf5fb46 --- /dev/null +++ b/arch/sh/boards/mach-microdev/fdc37c93xapm.c @@ -0,0 +1,160 @@ +/* + * + * Setup for the SMSC FDC37C93xAPM + * + * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) + * Copyright (C) 2003, 2004 SuperH, Inc. + * Copyright (C) 2004, 2005 Paul Mundt + * + * SuperH SH4-202 MicroDev board support. + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + */ +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/err.h> +#include <mach/microdev.h> + +#define SMSC_CONFIG_PORT_ADDR (0x3F0) +#define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR +#define SMSC_DATA_PORT_ADDR (SMSC_INDEX_PORT_ADDR + 1) + +#define SMSC_ENTER_CONFIG_KEY 0x55 +#define SMSC_EXIT_CONFIG_KEY 0xaa + +#define SMCS_LOGICAL_DEV_INDEX 0x07 /* Logical Device Number */ +#define SMSC_DEVICE_ID_INDEX 0x20 /* Device ID */ +#define SMSC_DEVICE_REV_INDEX 0x21 /* Device Revision */ +#define SMSC_ACTIVATE_INDEX 0x30 /* Activate */ +#define SMSC_PRIMARY_BASE_INDEX 0x60 /* Primary Base Address */ +#define SMSC_SECONDARY_BASE_INDEX 0x62 /* Secondary Base Address */ +#define SMSC_PRIMARY_INT_INDEX 0x70 /* Primary Interrupt Select */ +#define SMSC_SECONDARY_INT_INDEX 0x72 /* Secondary Interrupt Select */ +#define SMSC_HDCS0_INDEX 0xf0 /* HDCS0 Address Decoder */ +#define SMSC_HDCS1_INDEX 0xf1 /* HDCS1 Address Decoder */ + +#define SMSC_IDE1_DEVICE 1 /* IDE #1 logical device */ +#define SMSC_IDE2_DEVICE 2 /* IDE #2 logical device */ +#define SMSC_PARALLEL_DEVICE 3 /* Parallel Port logical device */ +#define SMSC_SERIAL1_DEVICE 4 /* Serial #1 logical device */ +#define SMSC_SERIAL2_DEVICE 5 /* Serial #2 logical device */ +#define SMSC_KEYBOARD_DEVICE 7 /* Keyboard logical device */ +#define SMSC_CONFIG_REGISTERS 8 /* Configuration Registers (Aux I/O) */ + +#define SMSC_READ_INDEXED(index) ({ \ + outb((index), SMSC_INDEX_PORT_ADDR); \ + inb(SMSC_DATA_PORT_ADDR); }) +#define SMSC_WRITE_INDEXED(val, index) ({ \ + outb((index), SMSC_INDEX_PORT_ADDR); \ + outb((val), SMSC_DATA_PORT_ADDR); }) + +#define IDE1_PRIMARY_BASE 0x01f0 /* Task File Registe base for IDE #1 */ +#define IDE1_SECONDARY_BASE 0x03f6 /* Miscellaneous AT registers for IDE #1 */ +#define IDE2_PRIMARY_BASE 0x0170 /* Task File Registe base for IDE #2 */ +#define IDE2_SECONDARY_BASE 0x0376 /* Miscellaneous AT registers for IDE #2 */ + +#define SERIAL1_PRIMARY_BASE 0x03f8 +#define SERIAL2_PRIMARY_BASE 0x02f8 + +#define MSB(x) ( (x) >> 8 ) +#define LSB(x) ( (x) & 0xff ) + + /* General-Purpose base address on CPU-board FPGA */ +#define MICRODEV_FPGA_GP_BASE 0xa6100000ul + +static int __init smsc_superio_setup(void) +{ + + unsigned char devid, devrev; + + /* Initially the chip is in run state */ + /* Put it into configuration state */ + outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); + + /* Read device ID info */ + devid = SMSC_READ_INDEXED(SMSC_DEVICE_ID_INDEX); + devrev = SMSC_READ_INDEXED(SMSC_DEVICE_REV_INDEX); + + if ((devid == 0x30) && (devrev == 0x01)) + printk("SMSC FDC37C93xAPM SuperIO device detected\n"); + else + return -ENODEV; + + /* Select the keyboard device */ + SMSC_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); + /* enable it */ + SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); + /* enable the interrupts */ + SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_KEYBOARD, SMSC_PRIMARY_INT_INDEX); + SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_MOUSE, SMSC_SECONDARY_INT_INDEX); + + /* Select the Serial #1 device */ + SMSC_WRITE_INDEXED(SMSC_SERIAL1_DEVICE, SMCS_LOGICAL_DEV_INDEX); + /* enable it */ + SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); + /* program with port addresses */ + SMSC_WRITE_INDEXED(MSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); + SMSC_WRITE_INDEXED(LSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); + SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); + /* enable the interrupts */ + SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL1, SMSC_PRIMARY_INT_INDEX); + + /* Select the Serial #2 device */ + SMSC_WRITE_INDEXED(SMSC_SERIAL2_DEVICE, SMCS_LOGICAL_DEV_INDEX); + /* enable it */ + SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); + /* program with port addresses */ + SMSC_WRITE_INDEXED(MSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); + SMSC_WRITE_INDEXED(LSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); + SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); + /* enable the interrupts */ + SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL2, SMSC_PRIMARY_INT_INDEX); + + /* Select the IDE#1 device */ + SMSC_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); + /* enable it */ + SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); + /* program with port addresses */ + SMSC_WRITE_INDEXED(MSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); + SMSC_WRITE_INDEXED(LSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); + SMSC_WRITE_INDEXED(MSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); + SMSC_WRITE_INDEXED(LSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); + SMSC_WRITE_INDEXED(0x0c, SMSC_HDCS0_INDEX); + SMSC_WRITE_INDEXED(0x00, SMSC_HDCS1_INDEX); + /* select the interrupt */ + SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE1, SMSC_PRIMARY_INT_INDEX); + + /* Select the IDE#2 device */ + SMSC_WRITE_INDEXED(SMSC_IDE2_DEVICE, SMCS_LOGICAL_DEV_INDEX); + /* enable it */ + SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); + /* program with port addresses */ + SMSC_WRITE_INDEXED(MSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); + SMSC_WRITE_INDEXED(LSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); + SMSC_WRITE_INDEXED(MSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); + SMSC_WRITE_INDEXED(LSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); + /* select the interrupt */ + SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE2, SMSC_PRIMARY_INT_INDEX); + + /* Select the configuration registers */ + SMSC_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, SMCS_LOGICAL_DEV_INDEX); + /* enable the appropriate GPIO pins for IDE functionality: + * bit[0] In/Out 1==input; 0==output + * bit[1] Polarity 1==invert; 0==no invert + * bit[2] Int Enb #1 1==Enable Combined IRQ #1; 0==disable + * bit[3:4] Function Select 00==original; 01==Alternate Function #1 + */ + SMSC_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ + SMSC_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ + SMSC_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ + SMSC_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ + SMSC_WRITE_INDEXED(0x08, 0xe8); /* GP20 = nIDE2_OE */ + + /* Exit the configuration state */ + outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); + + return 0; +} +device_initcall(smsc_superio_setup); diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c index 702753cbd28f..b551963579c1 100644 --- a/arch/sh/boards/mach-microdev/irq.c +++ b/arch/sh/boards/mach-microdev/irq.c @@ -67,27 +67,13 @@ static const struct { static void enable_microdev_irq(unsigned int irq); static void disable_microdev_irq(unsigned int irq); - - /* shutdown is same as "disable" */ -#define shutdown_microdev_irq disable_microdev_irq - static void mask_and_ack_microdev(unsigned int); -static void end_microdev_irq(unsigned int irq); - -static unsigned int startup_microdev_irq(unsigned int irq) -{ - enable_microdev_irq(irq); - return 0; /* never anything pending */ -} -static struct hw_interrupt_type microdev_irq_type = { - .typename = "MicroDev-IRQ", - .startup = startup_microdev_irq, - .shutdown = shutdown_microdev_irq, - .enable = enable_microdev_irq, - .disable = disable_microdev_irq, +static struct irq_chip microdev_irq_type = { + .name = "MicroDev-IRQ", + .unmask = enable_microdev_irq, + .mask = disable_microdev_irq, .ack = mask_and_ack_microdev, - .end = end_microdev_irq }; static void disable_microdev_irq(unsigned int irq) @@ -130,11 +116,11 @@ static void enable_microdev_irq(unsigned int irq) ctrl_outl(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG); } - /* This functions sets the desired irq handler to be a MicroDev type */ +/* This function sets the desired irq handler to be a MicroDev type */ static void __init make_microdev_irq(unsigned int irq) { disable_irq_nosync(irq); - irq_desc[irq].chip = µdev_irq_type; + set_irq_chip_and_handler(irq, µdev_irq_type, handle_level_irq); disable_microdev_irq(irq); } @@ -143,17 +129,11 @@ static void mask_and_ack_microdev(unsigned int irq) disable_microdev_irq(irq); } -static void end_microdev_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_microdev_irq(irq); -} - extern void __init init_microdev_irq(void) { int i; - /* disable interrupts on the FPGA INTC register */ + /* disable interrupts on the FPGA INTC register */ ctrl_outl(~0ul, MICRODEV_FPGA_INTDSB_REG); for (i = 0; i < NUM_EXTERNAL_IRQS; i++) @@ -179,5 +159,3 @@ extern void microdev_print_fpga_intc_status(void) printk("FPGA_INTPRI[3..0] = %08x:%08x:%08x:%08x\n", *intprid, *intpric, *intprib, *intpria); printk("-------------------------------------------------------------------------------\n"); } - - diff --git a/arch/sh/boards/mach-microdev/led.c b/arch/sh/boards/mach-microdev/led.c deleted file mode 100644 index 36e54b47a752..000000000000 --- a/arch/sh/boards/mach-microdev/led.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * linux/arch/sh/boards/superh/microdev/led.c - * - * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com> - * Copyright (C) 2003 Richard Curnow (Richard.Curnow@superh.com) - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - */ - -#include <asm/io.h> - -#define LED_REGISTER 0xa6104d20 - -static void mach_led_d9(int value) -{ - unsigned long reg; - reg = ctrl_inl(LED_REGISTER); - reg &= ~1; - reg |= (value & 1); - ctrl_outl(reg, LED_REGISTER); - return; -} - -static void mach_led_d10(int value) -{ - unsigned long reg; - reg = ctrl_inl(LED_REGISTER); - reg &= ~2; - reg |= ((value & 1) << 1); - ctrl_outl(reg, LED_REGISTER); - return; -} - - -#ifdef CONFIG_HEARTBEAT -#include <linux/sched.h> - -static unsigned char banner_table[] = { - 0x11, 0x01, 0x11, 0x01, 0x11, 0x03, - 0x11, 0x01, 0x11, 0x01, 0x13, 0x03, - 0x11, 0x01, 0x13, 0x01, 0x13, 0x01, 0x11, 0x03, - 0x11, 0x03, - 0x11, 0x01, 0x13, 0x01, 0x11, 0x03, - 0x11, 0x01, 0x11, 0x01, 0x11, 0x01, 0x11, 0x07, - 0x13, 0x01, 0x13, 0x03, - 0x11, 0x01, 0x11, 0x03, - 0x13, 0x01, 0x11, 0x01, 0x13, 0x01, 0x11, 0x03, - 0x11, 0x01, 0x13, 0x01, 0x11, 0x03, - 0x13, 0x01, 0x13, 0x01, 0x13, 0x03, - 0x13, 0x01, 0x11, 0x01, 0x11, 0x03, - 0x11, 0x03, - 0x11, 0x01, 0x11, 0x01, 0x11, 0x01, 0x13, 0x07, - 0xff -}; - -static void banner(void) -{ - static int pos = 0; - static int count = 0; - - if (count) { - count--; - } else { - int val = banner_table[pos]; - if (val == 0xff) { - pos = 0; - val = banner_table[pos]; - } - pos++; - mach_led_d10((val >> 4) & 1); - count = 10 * (val & 0xf); - } -} - -/* From heartbeat_harp in the stboards directory */ -/* acts like an actual heart beat -- ie thump-thump-pause... */ -void microdev_heartbeat(void) -{ - static unsigned cnt = 0, period = 0, dist = 0; - - if (cnt == 0 || cnt == dist) - mach_led_d9(1); - else if (cnt == 7 || cnt == dist+7) - mach_led_d9(0); - - if (++cnt > period) { - cnt = 0; - /* The hyperbolic function below modifies the heartbeat period - * length in dependency of the current (5min) load. It goes - * through the points f(0)=126, f(1)=86, f(5)=51, - * f(inf)->30. */ - period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30; - dist = period / 4; - } - - banner(); -} - -#endif diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c index a9202fe3cb59..d1df2a4fb9b8 100644 --- a/arch/sh/boards/mach-microdev/setup.c +++ b/arch/sh/boards/mach-microdev/setup.c @@ -17,70 +17,12 @@ #include <mach/microdev.h> #include <asm/io.h> #include <asm/machvec.h> - -extern void microdev_heartbeat(void); - - -/****************************************************************************/ - - - /* - * Setup for the SMSC FDC37C93xAPM - */ -#define SMSC_CONFIG_PORT_ADDR (0x3F0) -#define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR -#define SMSC_DATA_PORT_ADDR (SMSC_INDEX_PORT_ADDR + 1) - -#define SMSC_ENTER_CONFIG_KEY 0x55 -#define SMSC_EXIT_CONFIG_KEY 0xaa - -#define SMCS_LOGICAL_DEV_INDEX 0x07 /* Logical Device Number */ -#define SMSC_DEVICE_ID_INDEX 0x20 /* Device ID */ -#define SMSC_DEVICE_REV_INDEX 0x21 /* Device Revision */ -#define SMSC_ACTIVATE_INDEX 0x30 /* Activate */ -#define SMSC_PRIMARY_BASE_INDEX 0x60 /* Primary Base Address */ -#define SMSC_SECONDARY_BASE_INDEX 0x62 /* Secondary Base Address */ -#define SMSC_PRIMARY_INT_INDEX 0x70 /* Primary Interrupt Select */ -#define SMSC_SECONDARY_INT_INDEX 0x72 /* Secondary Interrupt Select */ -#define SMSC_HDCS0_INDEX 0xf0 /* HDCS0 Address Decoder */ -#define SMSC_HDCS1_INDEX 0xf1 /* HDCS1 Address Decoder */ - -#define SMSC_IDE1_DEVICE 1 /* IDE #1 logical device */ -#define SMSC_IDE2_DEVICE 2 /* IDE #2 logical device */ -#define SMSC_PARALLEL_DEVICE 3 /* Parallel Port logical device */ -#define SMSC_SERIAL1_DEVICE 4 /* Serial #1 logical device */ -#define SMSC_SERIAL2_DEVICE 5 /* Serial #2 logical device */ -#define SMSC_KEYBOARD_DEVICE 7 /* Keyboard logical device */ -#define SMSC_CONFIG_REGISTERS 8 /* Configuration Registers (Aux I/O) */ - -#define SMSC_READ_INDEXED(index) ({ \ - outb((index), SMSC_INDEX_PORT_ADDR); \ - inb(SMSC_DATA_PORT_ADDR); }) -#define SMSC_WRITE_INDEXED(val, index) ({ \ - outb((index), SMSC_INDEX_PORT_ADDR); \ - outb((val), SMSC_DATA_PORT_ADDR); }) - -#define IDE1_PRIMARY_BASE 0x01f0 /* Task File Registe base for IDE #1 */ -#define IDE1_SECONDARY_BASE 0x03f6 /* Miscellaneous AT registers for IDE #1 */ -#define IDE2_PRIMARY_BASE 0x0170 /* Task File Registe base for IDE #2 */ -#define IDE2_SECONDARY_BASE 0x0376 /* Miscellaneous AT registers for IDE #2 */ - -#define SERIAL1_PRIMARY_BASE 0x03f8 -#define SERIAL2_PRIMARY_BASE 0x02f8 - -#define MSB(x) ( (x) >> 8 ) -#define LSB(x) ( (x) & 0xff ) - - /* General-Purpose base address on CPU-board FPGA */ -#define MICRODEV_FPGA_GP_BASE 0xa6100000ul - - /* assume a Keyboard Controller is present */ -int microdev_kbd_controller_present = 1; +#include <asm/sizes.h> static struct resource smc91x_resources[] = { [0] = { .start = 0x300, - .end = 0x300 + 0x0001000 - 1, + .end = 0x300 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -97,7 +39,6 @@ static struct platform_device smc91x_device = { .resource = smc91x_resources, }; -#ifdef CONFIG_FB_S1D13XXX static struct s1d13xxxfb_regval s1d13806_initregs[] = { { S1DREG_MISC, 0x00 }, { S1DREG_COM_DISP_MODE, 0x00 }, @@ -216,12 +157,12 @@ static struct s1d13xxxfb_pdata s1d13806_platform_data = { static struct resource s1d13806_resources[] = { [0] = { .start = 0x07200000, - .end = 0x07200000 + 0x00200000 - 1, + .end = 0x07200000 + SZ_2M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x07000000, - .end = 0x07000000 + 0x00200000 - 1, + .end = 0x07000000 + SZ_2M - 1, .flags = IORESOURCE_MEM, }, }; @@ -236,145 +177,24 @@ static struct platform_device s1d13806_device = { .platform_data = &s1d13806_platform_data, }, }; -#endif static struct platform_device *microdev_devices[] __initdata = { &smc91x_device, -#ifdef CONFIG_FB_S1D13XXX &s1d13806_device, -#endif }; static int __init microdev_devices_setup(void) { return platform_add_devices(microdev_devices, ARRAY_SIZE(microdev_devices)); } - -/* - * Setup for the SMSC FDC37C93xAPM - */ -static int __init smsc_superio_setup(void) -{ - - unsigned char devid, devrev; - - /* Initially the chip is in run state */ - /* Put it into configuration state */ - outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); - - /* Read device ID info */ - devid = SMSC_READ_INDEXED(SMSC_DEVICE_ID_INDEX); - devrev = SMSC_READ_INDEXED(SMSC_DEVICE_REV_INDEX); - if ( (devid==0x30) && (devrev==0x01) ) - { - printk("SMSC FDC37C93xAPM SuperIO device detected\n"); - } - else - { /* not the device identity we expected */ - printk("Not detected a SMSC FDC37C93xAPM SuperIO device (devid=0x%02x, rev=0x%02x)\n", - devid, devrev); - /* inform the keyboard driver that we have no keyboard controller */ - microdev_kbd_controller_present = 0; - /* little point in doing anything else in this functon */ - return 0; - } - - /* Select the keyboard device */ - SMSC_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); - /* enable it */ - SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - /* enable the interrupts */ - SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_KEYBOARD, SMSC_PRIMARY_INT_INDEX); - SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_MOUSE, SMSC_SECONDARY_INT_INDEX); - - /* Select the Serial #1 device */ - SMSC_WRITE_INDEXED(SMSC_SERIAL1_DEVICE, SMCS_LOGICAL_DEV_INDEX); - /* enable it */ - SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - /* program with port addresses */ - SMSC_WRITE_INDEXED(MSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); - SMSC_WRITE_INDEXED(LSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); - SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); - /* enable the interrupts */ - SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL1, SMSC_PRIMARY_INT_INDEX); - - /* Select the Serial #2 device */ - SMSC_WRITE_INDEXED(SMSC_SERIAL2_DEVICE, SMCS_LOGICAL_DEV_INDEX); - /* enable it */ - SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - /* program with port addresses */ - SMSC_WRITE_INDEXED(MSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); - SMSC_WRITE_INDEXED(LSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); - SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); - /* enable the interrupts */ - SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL2, SMSC_PRIMARY_INT_INDEX); - - /* Select the IDE#1 device */ - SMSC_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); - /* enable it */ - SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - /* program with port addresses */ - SMSC_WRITE_INDEXED(MSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); - SMSC_WRITE_INDEXED(LSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); - SMSC_WRITE_INDEXED(MSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); - SMSC_WRITE_INDEXED(LSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); - SMSC_WRITE_INDEXED(0x0c, SMSC_HDCS0_INDEX); - SMSC_WRITE_INDEXED(0x00, SMSC_HDCS1_INDEX); - /* select the interrupt */ - SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE1, SMSC_PRIMARY_INT_INDEX); - - /* Select the IDE#2 device */ - SMSC_WRITE_INDEXED(SMSC_IDE2_DEVICE, SMCS_LOGICAL_DEV_INDEX); - /* enable it */ - SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); - /* program with port addresses */ - SMSC_WRITE_INDEXED(MSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); - SMSC_WRITE_INDEXED(LSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); - SMSC_WRITE_INDEXED(MSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); - SMSC_WRITE_INDEXED(LSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); - /* select the interrupt */ - SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE2, SMSC_PRIMARY_INT_INDEX); - - /* Select the configuration registers */ - SMSC_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, SMCS_LOGICAL_DEV_INDEX); - /* enable the appropriate GPIO pins for IDE functionality: - * bit[0] In/Out 1==input; 0==output - * bit[1] Polarity 1==invert; 0==no invert - * bit[2] Int Enb #1 1==Enable Combined IRQ #1; 0==disable - * bit[3:4] Function Select 00==original; 01==Alternate Function #1 - */ - SMSC_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ - SMSC_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ - SMSC_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ - SMSC_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ - SMSC_WRITE_INDEXED(0x08, 0xe8); /* GP20 = nIDE2_OE */ - - /* Exit the configuration state */ - outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); - - return 0; -} - -static void __init microdev_setup(char **cmdline_p) -{ - int * const fpgaRevisionRegister = (int*)(MICRODEV_FPGA_GP_BASE + 0x8ul); - const int fpgaRevision = *fpgaRevisionRegister; - int * const CacheControlRegister = (int*)CCR; - - device_initcall(microdev_devices_setup); - device_initcall(smsc_superio_setup); - - printk("SuperH %s board (FPGA rev: 0x%0x, CCR: 0x%0x)\n", - get_system_type(), fpgaRevision, *CacheControlRegister); -} +device_initcall(microdev_devices_setup); /* * The Machine Vector */ static struct sh_machine_vector mv_sh4202_microdev __initmv = { .mv_name = "SH4-202 MicroDev", - .mv_setup = microdev_setup, - .mv_nr_irqs = 72, /* QQQ need to check this - use the MACRO */ + .mv_nr_irqs = 72, .mv_inb = microdev_inb, .mv_inw = microdev_inw, @@ -398,8 +218,4 @@ static struct sh_machine_vector mv_sh4202_microdev __initmv = { .mv_outsl = microdev_outsl, .mv_init_irq = init_microdev_irq, - -#ifdef CONFIG_HEARTBEAT - .mv_heartbeat = microdev_heartbeat, -#endif }; diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 975281980299..cc1408119c24 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -89,6 +89,7 @@ static struct resource sh_keysc_resources[] = { static struct platform_device sh_keysc_device = { .name = "sh_keysc", + .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(sh_keysc_resources), .resource = sh_keysc_resources, .dev = { @@ -261,6 +262,8 @@ static struct sh_mobile_lcdc_info sh_mobile_lcdc_info = { .sys_bus_cfg = { .ldmt2r = 0x06000a09, .ldmt3r = 0x180e3418, + /* set 1s delay to encourage fsync() */ + .deferred_io_msec = 1000, }, } #endif @@ -273,6 +276,10 @@ static struct resource migor_lcdc_resources[] = { .end = 0xfe941fff, .flags = IORESOURCE_MEM, }, + [1] = { + .start = 28, + .flags = IORESOURCE_IRQ, + }, }; static struct platform_device migor_lcdc_device = { @@ -300,6 +307,7 @@ static void camera_power_on(void) gpio_set_value(GPIO_PTT3, 0); mdelay(10); gpio_set_value(GPIO_PTT3, 1); + mdelay(10); /* wait to let chip come out of reset */ } static void camera_power_off(void) @@ -432,6 +440,7 @@ static struct resource migor_ceu_resources[] = { static struct platform_device migor_ceu_device = { .name = "sh_mobile_ceu", + .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(migor_ceu_resources), .resource = migor_ceu_resources, .dev = { @@ -479,7 +488,6 @@ static int __init migor_devices_setup(void) ctrl_outl(0x00110080, BSC_CS4WCR); /* KEYSC */ - clk_always_enable("mstp214"); /* KEYSC */ gpio_request(GPIO_FN_KEYOUT0, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); @@ -501,7 +509,6 @@ static int __init migor_devices_setup(void) gpio_request(GPIO_FN_IRQ6, NULL); /* LCD Panel */ - clk_always_enable("mstp200"); /* LCDC */ #ifdef CONFIG_SH_MIGOR_QVGA /* LCDC - QVGA - Enable SYS Interface signals */ gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); @@ -554,7 +561,6 @@ static int __init migor_devices_setup(void) #endif /* CEU */ - clk_always_enable("mstp203"); /* CEU */ gpio_request(GPIO_FN_VIO_CLK2, NULL); gpio_request(GPIO_FN_VIO_VD2, NULL); gpio_request(GPIO_FN_VIO_HD2, NULL); @@ -589,12 +595,3 @@ static int __init migor_devices_setup(void) return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); } __initcall(migor_devices_setup); - -static void __init migor_setup(char **cmdline_p) -{ -} - -static struct sh_machine_vector mv_migor __initmv = { - .mv_name = "Migo-R", - .mv_setup = migor_setup, -}; diff --git a/arch/sh/boards/mach-rsk/Kconfig b/arch/sh/boards/mach-rsk/Kconfig new file mode 100644 index 000000000000..bff095dffc02 --- /dev/null +++ b/arch/sh/boards/mach-rsk/Kconfig @@ -0,0 +1,18 @@ +if SH_RSK + +choice + prompt "RSK+ options" + default SH_RSK7203 + +config SH_RSK7201 + bool "RSK7201" + depends on CPU_SUBTYPE_SH7201 + +config SH_RSK7203 + bool "RSK7203" + select GENERIC_GPIO + depends on CPU_SUBTYPE_SH7203 + +endchoice + +endif diff --git a/arch/sh/boards/mach-rsk/Makefile b/arch/sh/boards/mach-rsk/Makefile new file mode 100644 index 000000000000..498da75ce38b --- /dev/null +++ b/arch/sh/boards/mach-rsk/Makefile @@ -0,0 +1,2 @@ +obj-y := setup.o +obj-$(CONFIG_SH_RSK7203) += devices-rsk7203.o diff --git a/arch/sh/boards/board-rsk7203.c b/arch/sh/boards/mach-rsk/devices-rsk7203.c index 58266f06134a..73f743b9be8d 100644 --- a/arch/sh/boards/board-rsk7203.c +++ b/arch/sh/boards/mach-rsk/devices-rsk7203.c @@ -50,73 +50,6 @@ static struct platform_device smc911x_device = { }, }; -static const char *probes[] = { "cmdlinepart", NULL }; - -static struct mtd_partition *parsed_partitions; - -static struct mtd_partition rsk7203_partitions[] = { - { - .name = "Bootloader", - .offset = 0x00000000, - .size = 0x00040000, - .mask_flags = MTD_WRITEABLE, - }, { - .name = "Kernel", - .offset = MTDPART_OFS_NXTBLK, - .size = 0x001c0000, - }, { - .name = "Flash_FS", - .offset = MTDPART_OFS_NXTBLK, - .size = MTDPART_SIZ_FULL, - } -}; - -static struct physmap_flash_data flash_data = { - .width = 2, -}; - -static struct resource flash_resource = { - .start = 0x20000000, - .end = 0x20400000, - .flags = IORESOURCE_MEM, -}; - -static struct platform_device flash_device = { - .name = "physmap-flash", - .id = -1, - .resource = &flash_resource, - .num_resources = 1, - .dev = { - .platform_data = &flash_data, - }, -}; - -static struct mtd_info *flash_mtd; - -static struct map_info rsk7203_flash_map = { - .name = "RSK+ Flash", - .size = 0x400000, - .bankwidth = 2, -}; - -static void __init set_mtd_partitions(void) -{ - int nr_parts = 0; - - simple_map_init(&rsk7203_flash_map); - flash_mtd = do_map_probe("cfi_probe", &rsk7203_flash_map); - nr_parts = parse_mtd_partitions(flash_mtd, probes, - &parsed_partitions, 0); - /* If there is no partition table, used the hard coded table */ - if (nr_parts <= 0) { - flash_data.parts = rsk7203_partitions; - flash_data.nr_parts = ARRAY_SIZE(rsk7203_partitions); - } else { - flash_data.nr_parts = nr_parts; - flash_data.parts = parsed_partitions; - } -} - static struct gpio_led rsk7203_gpio_leds[] = { { .name = "green", @@ -155,7 +88,6 @@ static struct platform_device led_device = { static struct platform_device *rsk7203_devices[] __initdata = { &smc911x_device, - &flash_device, &led_device, }; @@ -165,15 +97,7 @@ static int __init rsk7203_devices_setup(void) gpio_request(GPIO_FN_TXD0, NULL); gpio_request(GPIO_FN_RXD0, NULL); - set_mtd_partitions(); return platform_add_devices(rsk7203_devices, ARRAY_SIZE(rsk7203_devices)); } device_initcall(rsk7203_devices_setup); - -/* - * The Machine Vector - */ -static struct sh_machine_vector mv_rsk7203 __initmv = { - .mv_name = "RSK+7203", -}; diff --git a/arch/sh/boards/mach-rsk/setup.c b/arch/sh/boards/mach-rsk/setup.c new file mode 100644 index 000000000000..af64d030a5c7 --- /dev/null +++ b/arch/sh/boards/mach-rsk/setup.c @@ -0,0 +1,106 @@ +/* + * Renesas Technology Europe RSK+ Support. + * + * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/physmap.h> +#include <linux/mtd/map.h> +#include <asm/machvec.h> +#include <asm/io.h> + +static const char *probes[] = { "cmdlinepart", NULL }; + +static struct mtd_partition *parsed_partitions; + +static struct mtd_partition rsk_partitions[] = { + { + .name = "Bootloader", + .offset = 0x00000000, + .size = 0x00040000, + .mask_flags = MTD_WRITEABLE, + }, { + .name = "Kernel", + .offset = MTDPART_OFS_NXTBLK, + .size = 0x001c0000, + }, { + .name = "Flash_FS", + .offset = MTDPART_OFS_NXTBLK, + .size = MTDPART_SIZ_FULL, + } +}; + +static struct physmap_flash_data flash_data = { + .width = 2, +}; + +static struct resource flash_resource = { + .start = 0x20000000, + .end = 0x20400000, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device flash_device = { + .name = "physmap-flash", + .id = -1, + .resource = &flash_resource, + .num_resources = 1, + .dev = { + .platform_data = &flash_data, + }, +}; + +static struct mtd_info *flash_mtd; + +static struct map_info rsk_flash_map = { + .name = "RSK+ Flash", + .size = 0x400000, + .bankwidth = 2, +}; + +static void __init set_mtd_partitions(void) +{ + int nr_parts = 0; + + simple_map_init(&rsk_flash_map); + flash_mtd = do_map_probe("cfi_probe", &rsk_flash_map); + nr_parts = parse_mtd_partitions(flash_mtd, probes, + &parsed_partitions, 0); + /* If there is no partition table, used the hard coded table */ + if (nr_parts <= 0) { + flash_data.parts = rsk_partitions; + flash_data.nr_parts = ARRAY_SIZE(rsk_partitions); + } else { + flash_data.nr_parts = nr_parts; + flash_data.parts = parsed_partitions; + } +} + +static struct platform_device *rsk_devices[] __initdata = { + &flash_device, +}; + +static int __init rsk_devices_setup(void) +{ + set_mtd_partitions(); + return platform_add_devices(rsk_devices, + ARRAY_SIZE(rsk_devices)); +} +device_initcall(rsk_devices_setup); + +/* + * The Machine Vector + */ +static struct sh_machine_vector mv_rsk __initmv = { + .mv_name = "RSK+", +}; diff --git a/arch/sh/boards/mach-se/7343/Makefile b/arch/sh/boards/mach-se/7343/Makefile index 3024796c6203..4c3666a93790 100644 --- a/arch/sh/boards/mach-se/7343/Makefile +++ b/arch/sh/boards/mach-se/7343/Makefile @@ -2,4 +2,4 @@ # Makefile for the 7343 SolutionEngine specific parts of the kernel # -obj-y := setup.o io.o irq.o +obj-y := setup.o irq.o diff --git a/arch/sh/boards/mach-se/7343/io.c b/arch/sh/boards/mach-se/7343/io.c deleted file mode 100644 index 8741abc1da7b..000000000000 --- a/arch/sh/boards/mach-se/7343/io.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * arch/sh/boards/se/7343/io.c - * - * I/O routine for SH-Mobile3AS 7343 SolutionEngine. - * - */ -#include <linux/kernel.h> -#include <asm/io.h> -#include <mach-se/mach/se7343.h> - -#define badio(fn, a) panic("bad i/o operation %s for %08lx.", #fn, a) - -struct iop { - unsigned long start, end; - unsigned long base; - struct iop *(*check) (struct iop * p, unsigned long port); - unsigned char (*inb) (struct iop * p, unsigned long port); - unsigned short (*inw) (struct iop * p, unsigned long port); - void (*outb) (struct iop * p, unsigned char value, unsigned long port); - void (*outw) (struct iop * p, unsigned short value, unsigned long port); -}; - -struct iop * -simple_check(struct iop *p, unsigned long port) -{ - static int count; - - if (count < 100) - count++; - - port &= 0xFFFF; - - if ((p->start <= port) && (port <= p->end)) - return p; - else - badio(check, port); -} - -struct iop * -ide_check(struct iop *p, unsigned long port) -{ - if (((0x1f0 <= port) && (port <= 0x1f7)) || (port == 0x3f7)) - return p; - return NULL; -} - -unsigned char -simple_inb(struct iop *p, unsigned long port) -{ - return *(unsigned char *) (p->base + port); -} - -unsigned short -simple_inw(struct iop *p, unsigned long port) -{ - return *(unsigned short *) (p->base + port); -} - -void -simple_outb(struct iop *p, unsigned char value, unsigned long port) -{ - *(unsigned char *) (p->base + port) = value; -} - -void -simple_outw(struct iop *p, unsigned short value, unsigned long port) -{ - *(unsigned short *) (p->base + port) = value; -} - -unsigned char -pcc_inb(struct iop *p, unsigned long port) -{ - unsigned long addr = p->base + port + 0x40000; - unsigned long v; - - if (port & 1) - addr += 0x00400000; - v = *(volatile unsigned char *) addr; - return v; -} - -void -pcc_outb(struct iop *p, unsigned char value, unsigned long port) -{ - unsigned long addr = p->base + port + 0x40000; - - if (port & 1) - addr += 0x00400000; - *(volatile unsigned char *) addr = value; -} - -unsigned char -bad_inb(struct iop *p, unsigned long port) -{ - badio(inb, port); -} - -void -bad_outb(struct iop *p, unsigned char value, unsigned long port) -{ - badio(inw, port); -} - -#ifdef CONFIG_SMC91X -/* MSTLANEX01 LAN at 0xb400:0000 */ -static struct iop laniop = { - .start = 0x00, - .end = 0x0F, - .base = 0x04000000, - .check = simple_check, - .inb = simple_inb, - .inw = simple_inw, - .outb = simple_outb, - .outw = simple_outw, -}; -#endif - -#ifdef CONFIG_NE2000 -/* NE2000 pc card NIC */ -static struct iop neiop = { - .start = 0x280, - .end = 0x29f, - .base = 0xb0600000 + 0x80, /* soft 0x280 -> hard 0x300 */ - .check = simple_check, - .inb = pcc_inb, - .inw = simple_inw, - .outb = pcc_outb, - .outw = simple_outw, -}; -#endif - -#ifdef CONFIG_IDE -/* CF in CF slot */ -static struct iop cfiop = { - .base = 0xb0600000, - .check = ide_check, - .inb = pcc_inb, - .inw = simple_inw, - .outb = pcc_outb, - .outw = simple_outw, -}; -#endif - -static __inline__ struct iop * -port2iop(unsigned long port) -{ - if (0) ; -#if defined(CONFIG_SMC91X) - else if (laniop.check(&laniop, port)) - return &laniop; -#endif -#if defined(CONFIG_NE2000) - else if (neiop.check(&neiop, port)) - return &neiop; -#endif -#if defined(CONFIG_IDE) - else if (cfiop.check(&cfiop, port)) - return &cfiop; -#endif - else - return NULL; -} - -static inline void -delay(void) -{ - ctrl_inw(0xac000000); - ctrl_inw(0xac000000); -} - -unsigned char -sh7343se_inb(unsigned long port) -{ - struct iop *p = port2iop(port); - return (p->inb) (p, port); -} - -unsigned char -sh7343se_inb_p(unsigned long port) -{ - unsigned char v = sh7343se_inb(port); - delay(); - return v; -} - -unsigned short -sh7343se_inw(unsigned long port) -{ - struct iop *p = port2iop(port); - return (p->inw) (p, port); -} - -unsigned int -sh7343se_inl(unsigned long port) -{ - badio(inl, port); -} - -void -sh7343se_outb(unsigned char value, unsigned long port) -{ - struct iop *p = port2iop(port); - (p->outb) (p, value, port); -} - -void -sh7343se_outb_p(unsigned char value, unsigned long port) -{ - sh7343se_outb(value, port); - delay(); -} - -void -sh7343se_outw(unsigned short value, unsigned long port) -{ - struct iop *p = port2iop(port); - (p->outw) (p, value, port); -} - -void -sh7343se_outl(unsigned int value, unsigned long port) -{ - badio(outl, port); -} - -void -sh7343se_insb(unsigned long port, void *addr, unsigned long count) -{ - unsigned char *a = addr; - struct iop *p = port2iop(port); - while (count--) - *a++ = (p->inb) (p, port); -} - -void -sh7343se_insw(unsigned long port, void *addr, unsigned long count) -{ - unsigned short *a = addr; - struct iop *p = port2iop(port); - while (count--) - *a++ = (p->inw) (p, port); -} - -void -sh7343se_insl(unsigned long port, void *addr, unsigned long count) -{ - badio(insl, port); -} - -void -sh7343se_outsb(unsigned long port, const void *addr, unsigned long count) -{ - unsigned char *a = (unsigned char *) addr; - struct iop *p = port2iop(port); - while (count--) - (p->outb) (p, *a++, port); -} - -void -sh7343se_outsw(unsigned long port, const void *addr, unsigned long count) -{ - unsigned short *a = (unsigned short *) addr; - struct iop *p = port2iop(port); - while (count--) - (p->outw) (p, *a++, port); -} - -void -sh7343se_outsl(unsigned long port, const void *addr, unsigned long count) -{ - badio(outsw, port); -} diff --git a/arch/sh/boards/mach-se/7343/setup.c b/arch/sh/boards/mach-se/7343/setup.c index 486f40bf9274..4de56f35f419 100644 --- a/arch/sh/boards/mach-se/7343/setup.c +++ b/arch/sh/boards/mach-se/7343/setup.c @@ -1,36 +1,16 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> +#include <linux/serial_8250.h> +#include <linux/serial_reg.h> +#include <linux/usb/isp116x.h> +#include <linux/delay.h> #include <asm/machvec.h> #include <mach-se/mach/se7343.h> #include <asm/heartbeat.h> #include <asm/irq.h> #include <asm/io.h> -static struct resource smc91x_resources[] = { - [0] = { - .start = 0x10000000, - .end = 0x1000000F, - .flags = IORESOURCE_MEM, - }, - [1] = { - /* - * shared with other devices via externel - * interrupt controller in FPGA... - */ - .start = SMC_IRQ, - .end = SMC_IRQ, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device smc91x_device = { - .name = "smc91x", - .id = 0, - .num_resources = ARRAY_SIZE(smc91x_resources), - .resource = smc91x_resources, -}; - static struct resource heartbeat_resources[] = { [0] = { .start = PA_LED, @@ -94,10 +74,83 @@ static struct platform_device nor_flash_device = { .resource = nor_flash_resources, }; +#define ST16C2550C_FLAGS (UPF_BOOT_AUTOCONF | UPF_IOREMAP) + +static struct plat_serial8250_port serial_platform_data[] = { + [0] = { + .iotype = UPIO_MEM, + .mapbase = 0x16000000, + .regshift = 1, + .flags = ST16C2550C_FLAGS, + .irq = UARTA_IRQ, + .uartclk = 7372800, + }, + [1] = { + .iotype = UPIO_MEM, + .mapbase = 0x17000000, + .regshift = 1, + .flags = ST16C2550C_FLAGS, + .irq = UARTB_IRQ, + .uartclk = 7372800, + }, + { }, +}; + +static struct platform_device uart_device = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev = { + .platform_data = serial_platform_data, + }, +}; + +static void isp116x_delay(struct device *dev, int delay) +{ + ndelay(delay); +} + +static struct resource usb_resources[] = { + [0] = { + .start = 0x11800000, + .end = 0x11800001, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 0x11800002, + .end = 0x11800003, + .flags = IORESOURCE_MEM, + }, + [2] = { + .start = USB_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct isp116x_platform_data usb_platform_data = { + .sel15Kres = 1, + .oc_enable = 1, + .int_act_high = 0, + .int_edge_triggered = 0, + .remote_wakeup_enable = 0, + .delay = isp116x_delay, +}; + +static struct platform_device usb_device = { + .name = "isp116x-hcd", + .id = -1, + .num_resources = ARRAY_SIZE(usb_resources), + .resource = usb_resources, + .dev = { + .platform_data = &usb_platform_data, + }, + +}; + static struct platform_device *sh7343se_platform_devices[] __initdata = { - &smc91x_device, &heartbeat_device, &nor_flash_device, + &uart_device, + &usb_device, }; static int __init sh7343se_devices_setup(void) @@ -126,27 +179,6 @@ static void __init sh7343se_setup(char **cmdline_p) static struct sh_machine_vector mv_7343se __initmv = { .mv_name = "SolutionEngine 7343", .mv_setup = sh7343se_setup, - .mv_nr_irqs = 108, - .mv_inb = sh7343se_inb, - .mv_inw = sh7343se_inw, - .mv_inl = sh7343se_inl, - .mv_outb = sh7343se_outb, - .mv_outw = sh7343se_outw, - .mv_outl = sh7343se_outl, - - .mv_inb_p = sh7343se_inb_p, - .mv_inw_p = sh7343se_inw, - .mv_inl_p = sh7343se_inl, - .mv_outb_p = sh7343se_outb_p, - .mv_outw_p = sh7343se_outw, - .mv_outl_p = sh7343se_outl, - - .mv_insb = sh7343se_insb, - .mv_insw = sh7343se_insw, - .mv_insl = sh7343se_insl, - .mv_outsb = sh7343se_outsb, - .mv_outsw = sh7343se_outsw, - .mv_outsl = sh7343se_outsl, - + .mv_nr_irqs = SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_NR, .mv_init_irq = init_7343se_IRQ, }; diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c index 9123d9687bf7..527eb6b12610 100644 --- a/arch/sh/boards/mach-se/770x/setup.c +++ b/arch/sh/boards/mach-se/770x/setup.c @@ -8,8 +8,9 @@ */ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/machvec.h> #include <mach-se/mach/se.h> +#include <mach-se/mach/mrshpc.h> +#include <asm/machvec.h> #include <asm/io.h> #include <asm/smc37c93x.h> #include <asm/heartbeat.h> @@ -175,6 +176,7 @@ static struct platform_device *se_devices[] __initdata = { static int __init se_devices_setup(void) { + mrshpc_setup_windows(); return platform_add_devices(se_devices, ARRAY_SIZE(se_devices)); } device_initcall(se_devices_setup); diff --git a/arch/sh/boards/mach-se/7721/setup.c b/arch/sh/boards/mach-se/7721/setup.c index d3fc80ff4d83..55af4c36b43a 100644 --- a/arch/sh/boards/mach-se/7721/setup.c +++ b/arch/sh/boards/mach-se/7721/setup.c @@ -12,8 +12,9 @@ */ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/machvec.h> #include <mach-se/mach/se7721.h> +#include <mach-se/mach/mrshpc.h> +#include <asm/machvec.h> #include <asm/io.h> #include <asm/heartbeat.h> @@ -74,8 +75,8 @@ static struct platform_device *se7721_devices[] __initdata = { static int __init se7721_devices_setup(void) { - return platform_add_devices(se7721_devices, - ARRAY_SIZE(se7721_devices)); + mrshpc_setup_windows(); + return platform_add_devices(se7721_devices, ARRAY_SIZE(se7721_devices)); } device_initcall(se7721_devices_setup); diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c index fe6f96517e12..af84904ed86f 100644 --- a/arch/sh/boards/mach-se/7722/setup.c +++ b/arch/sh/boards/mach-se/7722/setup.c @@ -15,9 +15,10 @@ #include <linux/ata_platform.h> #include <linux/input.h> #include <linux/smc91x.h> +#include <mach-se/mach/se7722.h> +#include <mach-se/mach/mrshpc.h> #include <asm/machvec.h> #include <asm/clock.h> -#include <mach-se/mach/se7722.h> #include <asm/io.h> #include <asm/heartbeat.h> #include <asm/sh_keysc.h> @@ -130,6 +131,7 @@ static struct resource sh_keysc_resources[] = { static struct platform_device sh_keysc_device = { .name = "sh_keysc", + .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(sh_keysc_resources), .resource = sh_keysc_resources, .dev = { @@ -146,10 +148,8 @@ static struct platform_device *se7722_devices[] __initdata = { static int __init se7722_devices_setup(void) { - clk_always_enable("mstp214"); /* KEYSC */ - - return platform_add_devices(se7722_devices, - ARRAY_SIZE(se7722_devices)); + mrshpc_setup_windows(); + return platform_add_devices(se7722_devices, ARRAY_SIZE(se7722_devices)); } device_initcall(se7722_devices_setup); diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c index 5771219be3fd..74cfb4b8b03d 100644 --- a/arch/sh/boards/mach-sh03/setup.c +++ b/arch/sh/boards/mach-sh03/setup.c @@ -9,6 +9,7 @@ #include <linux/irq.h> #include <linux/pci.h> #include <linux/platform_device.h> +#include <linux/ata_platform.h> #include <asm/io.h> #include <asm/rtc.h> #include <mach-sh03/mach/io.h> @@ -20,19 +21,6 @@ static void __init init_sh03_IRQ(void) plat_irq_setup_pins(IRQ_MODE_IRQ); } -extern void *cf_io_base; - -static void __iomem *sh03_ioport_map(unsigned long port, unsigned int size) -{ - if (PXSEG(port)) - return (void __iomem *)port; - /* CompactFlash (IDE) */ - if (((port >= 0x1f0) && (port <= 0x1f7)) || (port == 0x3f6)) - return (void __iomem *)((unsigned long)cf_io_base + port); - - return (void __iomem *)(port + PCI_IO_BASE); -} - /* arch/sh/boards/sh03/rtc.c */ void sh03_time_init(void); @@ -41,6 +29,30 @@ static void __init sh03_setup(char **cmdline_p) board_time_init = sh03_time_init; } +static struct resource cf_ide_resources[] = { + [0] = { + .start = 0x1f0, + .end = 0x1f0 + 8, + .flags = IORESOURCE_IO, + }, + [1] = { + .start = 0x1f0 + 0x206, + .end = 0x1f0 +8 + 0x206 + 8, + .flags = IORESOURCE_IO, + }, + [2] = { + .start = IRL2_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device cf_ide_device = { + .name = "pata_platform", + .id = -1, + .num_resources = ARRAY_SIZE(cf_ide_resources), + .resource = cf_ide_resources, +}; + static struct resource heartbeat_resources[] = { [0] = { .start = 0xa0800000, @@ -58,10 +70,30 @@ static struct platform_device heartbeat_device = { static struct platform_device *sh03_devices[] __initdata = { &heartbeat_device, + &cf_ide_device, }; static int __init sh03_devices_setup(void) { + pgprot_t prot; + unsigned long paddrbase; + void *cf_ide_base; + + /* open I/O area window */ + paddrbase = virt_to_phys((void *)PA_AREA5_IO); + prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); + cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); + if (!cf_ide_base) { + printk("allocate_cf_area : can't open CF I/O window!\n"); + return -ENOMEM; + } + + /* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */ + cf_ide_resources[0].start += (unsigned long)cf_ide_base; + cf_ide_resources[0].end += (unsigned long)cf_ide_base; + cf_ide_resources[1].start += (unsigned long)cf_ide_base; + cf_ide_resources[1].end += (unsigned long)cf_ide_base; + return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices)); } __initcall(sh03_devices_setup); @@ -70,6 +102,5 @@ static struct sh_machine_vector mv_sh03 __initmv = { .mv_name = "Interface (CTP/PCI-SH03)", .mv_setup = sh03_setup, .mv_nr_irqs = 48, - .mv_ioport_map = sh03_ioport_map, .mv_init_irq = init_sh03_IRQ, }; diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c index 538406872a89..986a0e71d220 100644 --- a/arch/sh/boards/mach-systemh/irq.c +++ b/arch/sh/boards/mach-systemh/irq.c @@ -12,8 +12,8 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/io.h> -#include <asm/io.h> #include <mach/systemh7751.h> #include <asm/smc37c93x.h> @@ -24,35 +24,17 @@ static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004; static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000; /* forward declaration */ -static unsigned int startup_systemh_irq(unsigned int irq); -static void shutdown_systemh_irq(unsigned int irq); static void enable_systemh_irq(unsigned int irq); static void disable_systemh_irq(unsigned int irq); static void mask_and_ack_systemh(unsigned int); -static void end_systemh_irq(unsigned int irq); -/* hw_interrupt_type */ -static struct hw_interrupt_type systemh_irq_type = { - .typename = " SystemH Register", - .startup = startup_systemh_irq, - .shutdown = shutdown_systemh_irq, - .enable = enable_systemh_irq, - .disable = disable_systemh_irq, +static struct irq_chip systemh_irq_type = { + .name = " SystemH Register", + .unmask = enable_systemh_irq, + .mask = disable_systemh_irq, .ack = mask_and_ack_systemh, - .end = end_systemh_irq }; -static unsigned int startup_systemh_irq(unsigned int irq) -{ - enable_systemh_irq(irq); - return 0; /* never anything pending */ -} - -static void shutdown_systemh_irq(unsigned int irq) -{ - disable_systemh_irq(irq); -} - static void disable_systemh_irq(unsigned int irq) { if (systemh_irq_mask_register) { @@ -86,16 +68,9 @@ static void mask_and_ack_systemh(unsigned int irq) disable_systemh_irq(irq); } -static void end_systemh_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_systemh_irq(irq); -} - void make_systemh_irq(unsigned int irq) { disable_irq_nosync(irq); - irq_desc[irq].chip = &systemh_irq_type; + set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq); disable_systemh_irq(irq); } - diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c index f1a4a0763c59..27ceeb948bb1 100644 --- a/arch/sh/cchips/hd6446x/hd64461.c +++ b/arch/sh/cchips/hd6446x/hd64461.c @@ -10,99 +10,49 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/irq.h> #include <asm/hd64461.h> /* This belongs in cpu specific */ #define INTC_ICR1 0xA4140010UL -static void disable_hd64461_irq(unsigned int irq) +static void hd64461_mask_irq(unsigned int irq) { unsigned short nimr; unsigned short mask = 1 << (irq - HD64461_IRQBASE); - nimr = inw(HD64461_NIMR); + nimr = __raw_readw(HD64461_NIMR); nimr |= mask; - outw(nimr, HD64461_NIMR); + __raw_writew(nimr, HD64461_NIMR); } -static void enable_hd64461_irq(unsigned int irq) +static void hd64461_unmask_irq(unsigned int irq) { unsigned short nimr; unsigned short mask = 1 << (irq - HD64461_IRQBASE); - nimr = inw(HD64461_NIMR); + nimr = __raw_readw(HD64461_NIMR); nimr &= ~mask; - outw(nimr, HD64461_NIMR); + __raw_writew(nimr, HD64461_NIMR); } -static void mask_and_ack_hd64461(unsigned int irq) +static void hd64461_mask_and_ack_irq(unsigned int irq) { - disable_hd64461_irq(irq); + hd64461_mask_irq(irq); #ifdef CONFIG_HD64461_ENABLER if (irq == HD64461_IRQBASE + 13) - outb(0x00, HD64461_PCC1CSCR); + __raw_writeb(0x00, HD64461_PCC1CSCR); #endif } -static void end_hd64461_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_hd64461_irq(irq); -} - -static unsigned int startup_hd64461_irq(unsigned int irq) -{ - enable_hd64461_irq(irq); - return 0; -} - -static void shutdown_hd64461_irq(unsigned int irq) -{ - disable_hd64461_irq(irq); -} - -static struct hw_interrupt_type hd64461_irq_type = { - .typename = "HD64461-IRQ", - .startup = startup_hd64461_irq, - .shutdown = shutdown_hd64461_irq, - .enable = enable_hd64461_irq, - .disable = disable_hd64461_irq, - .ack = mask_and_ack_hd64461, - .end = end_hd64461_irq, +static struct irq_chip hd64461_irq_chip = { + .name = "HD64461-IRQ", + .mask = hd64461_mask_irq, + .mask_ack = hd64461_mask_and_ack_irq, + .unmask = hd64461_unmask_irq, }; -static irqreturn_t hd64461_interrupt(int irq, void *dev_id) -{ - printk(KERN_INFO - "HD64461: spurious interrupt, nirr: 0x%x nimr: 0x%x\n", - inw(HD64461_NIRR), inw(HD64461_NIMR)); - - return IRQ_NONE; -} - -static struct { - int (*func) (int, void *); - void *dev; -} hd64461_demux[HD64461_IRQ_NUM]; - -void hd64461_register_irq_demux(int irq, - int (*demux) (int irq, void *dev), void *dev) -{ - hd64461_demux[irq - HD64461_IRQBASE].func = demux; - hd64461_demux[irq - HD64461_IRQBASE].dev = dev; -} - -EXPORT_SYMBOL(hd64461_register_irq_demux); - -void hd64461_unregister_irq_demux(int irq) -{ - hd64461_demux[irq - HD64461_IRQBASE].func = 0; -} - -EXPORT_SYMBOL(hd64461_unregister_irq_demux); - int hd64461_irq_demux(int irq) { if (irq == CONFIG_HD64461_IRQ) { @@ -115,25 +65,11 @@ int hd64461_irq_demux(int irq) for (bit = 1, i = 0; i < 16; bit <<= 1, i++) if (nirr & bit) break; - if (i == 16) - irq = CONFIG_HD64461_IRQ; - else { - irq = HD64461_IRQBASE + i; - if (hd64461_demux[i].func != 0) { - irq = hd64461_demux[i].func(irq, hd64461_demux[i].dev); - } - } + irq = HD64461_IRQBASE + i; } return irq; } -static struct irqaction irq0 = { - .handler = hd64461_interrupt, - .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, - .name = "HD64461", -}; - int __init setup_hd64461(void) { int i; @@ -146,22 +82,21 @@ int __init setup_hd64461(void) CONFIG_HD64461_IOBASE, CONFIG_HD64461_IRQ, HD64461_IRQBASE, HD64461_IRQBASE + 15); -#if defined(CONFIG_CPU_SUBTYPE_SH7709) /* Should be at processor specific part.. */ - outw(0x2240, INTC_ICR1); +/* Should be at processor specific part.. */ +#if defined(CONFIG_CPU_SUBTYPE_SH7709) + __raw_writew(0x2240, INTC_ICR1); #endif - outw(0xffff, HD64461_NIMR); + __raw_writew(0xffff, HD64461_NIMR); /* IRQ 80 -> 95 belongs to HD64461 */ - for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++) { - irq_desc[i].chip = &hd64461_irq_type; - } - - setup_irq(CONFIG_HD64461_IRQ, &irq0); + for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++) + set_irq_chip_and_handler(i, &hd64461_irq_chip, + handle_level_irq); #ifdef CONFIG_HD64461_ENABLER printk(KERN_INFO "HD64461: enabling PCMCIA devices\n"); - outb(0x4c, HD64461_PCC1CSCIER); - outb(0x00, HD64461_PCC1CSCR); + __raw_writeb(0x4c, HD64461_PCC1CSCIER); + __raw_writeb(0x00, HD64461_PCC1CSCR); #endif return 0; diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig new file mode 100644 index 000000000000..8f4329fbbd39 --- /dev/null +++ b/arch/sh/configs/edosk7705_defconfig @@ -0,0 +1,438 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.28-rc6 +# Wed Dec 17 13:53:02 2008 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +# CONFIG_GENERIC_GPIO is not set +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SYSVIPC is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=17 +# CONFIG_CGROUPS is not set +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_EMBEDDED=y +# CONFIG_UID16 is not set +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +# CONFIG_HOTPLUG is not set +# CONFIG_PRINTK is not set +# CONFIG_BUG is not set +# CONFIG_ELF_CORE is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_BASE_FULL is not set +# CONFIG_FUTEX is not set +# CONFIG_EPOLL is not set +# CONFIG_SIGNALFD is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +CONFIG_SHMEM=y +# CONFIG_AIO is not set +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=1 +# CONFIG_MODULES is not set +# CONFIG_BLOCK is not set +CONFIG_CLASSIC_RCU=y +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH3=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +CONFIG_CPU_SUBTYPE_SH7705=y +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x08000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_29BIT=y +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +# CONFIG_FLATMEM_MANUAL is not set +# CONFIG_DISCONTIGMEM_MANUAL is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MIGRATION=y +# CONFIG_RESOURCES_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_UNEVICTABLE_LRU=y + +# +# Cache configuration +# +CONFIG_SH7705_CACHE_32KB=y +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_ADC=y +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y + +# +# Board support +# +# CONFIG_SH_SOLUTION_ENGINE is not set +CONFIG_SH_EDOSK7705=y + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=16 +CONFIG_SH_PCLK_FREQ=31250000 +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +# CONFIG_HEARTBEAT is not set +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +# CONFIG_SCHED_HRTICK is not set +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_GUSA=y +# CONFIG_GUSA_RB is not set + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_MTD is not set +# CONFIG_PARPORT is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y + +# +# SCSI device support +# +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_SH_SCI is not set +# CONFIG_UNIX98_PTYS is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_SOUND is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set +CONFIG_STAGING_EXCLUDE_BUILD=y + +# +# File systems +# +# CONFIG_DNOTIFY is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Pseudo filesystems +# +# CONFIG_PROC_FS is not set +# CONFIG_SYSFS is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set + +# +# Miscellaneous filesystems +# +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_LATENCYTOP is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_EARLY_SCIF_CONSOLE is not set +# CONFIG_MORE_COMPILE_OPTIONS is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_CRYPTO is not set + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +# CONFIG_CRC32 is not set +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig new file mode 100644 index 000000000000..014c18cbf46a --- /dev/null +++ b/arch/sh/configs/rsk7201_defconfig @@ -0,0 +1,703 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.28-rc6 +# Mon Dec 8 14:48:02 2008 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +# CONFIG_GENERIC_GPIO is not set +# CONFIG_GENERIC_TIME is not set +# CONFIG_GENERIC_CLOCKEVENTS is not set +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_IKCONFIG=y +# CONFIG_IKCONFIG_PROC is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_CGROUPS is not set +# CONFIG_GROUP_SCHED is not set +# CONFIG_SYSFS_DEPRECATED_V2 is not set +# CONFIG_RELAY is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_AIO is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLAB is not set +# CONFIG_SLUB is not set +CONFIG_SLOB=y +CONFIG_PROFILING=y +# CONFIG_MARKERS is not set +CONFIG_OPROFILE=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_RT_MUTEXES=y +CONFIG_TINY_SHMEM=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +# CONFIG_MODULE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_LSF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" +CONFIG_CLASSIC_RCU=y +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH2=y +CONFIG_CPU_SH2A=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +CONFIG_CPU_SUBTYPE_SH7201=y +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_PAGE_OFFSET=0x00000000 +CONFIG_MEMORY_START=0x08000000 +CONFIG_MEMORY_SIZE=0x01000000 +CONFIG_29BIT=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_RESOURCES_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +# CONFIG_CPU_LITTLE_ENDIAN is not set +CONFIG_CPU_BIG_ENDIAN=y +CONFIG_SH_FPU=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +CONFIG_SH_RSK=y +CONFIG_SH_RSK7201=y +# CONFIG_SH_RSK7203 is not set + +# +# Timer and clock configuration +# +# CONFIG_SH_CMT is not set +CONFIG_SH_MTU2=y +CONFIG_SH_TIMER_IRQ=16 +CONFIG_SH_PCLK_FREQ=40000000 +CONFIG_SH_CLK_MD=0 + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +# CONFIG_HEARTBEAT is not set +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +# CONFIG_SCHED_HRTICK is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_SECCOMP is not set +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_GUSA=y + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ignore_loglevel" + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF_FDPIC=y +CONFIG_BINFMT_FLAT=y +CONFIG_BINFMT_ZFLAT=y +CONFIG_BINFMT_SHARED_FLAT=y +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options (EXPERIMENTAL) +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_REDBOOT_PARTS=y +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CFI_AMDSTD=y +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_START=0x0 +CONFIG_MTD_PHYSMAP_LEN=0x0 +CONFIG_MTD_PHYSMAP_BANKWIDTH=4 +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_BLK_DEV_HD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_C2PORT is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=8 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_UNIX98_PTYS is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +CONFIG_THERMAL=y +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_DAB=y + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_SH=y +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set +CONFIG_STAGING_EXCLUDE_BUILD=y + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_FILE_LOCKING is not set +# CONFIG_XFS_FS is not set +# CONFIG_DNOTIFY is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +CONFIG_ROMFS_FS=y +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_LATENCYTOP is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_SAMPLES is not set +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_EARLY_SCIF_CONSOLE is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig index 85b0ac4fc667..dcdef31cf19b 100644 --- a/arch/sh/configs/rsk7203_defconfig +++ b/arch/sh/configs/rsk7203_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27 -# Tue Oct 21 12:58:47 2008 +# Linux kernel version: 2.6.28-rc6 +# Mon Dec 8 14:35:03 2008 # CONFIG_SUPERH=y CONFIG_SUPERH32=y @@ -16,6 +16,8 @@ CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_GPIO=y # CONFIG_GENERIC_TIME is not set # CONFIG_GENERIC_CLOCKEVENTS is not set +# CONFIG_ARCH_SUSPEND_POSSIBLE is not set +# CONFIG_ARCH_HIBERNATION_POSSIBLE is not set CONFIG_STACKTRACE_SUPPORT=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_HAVE_LATENCYTOP_SUPPORT=y @@ -75,7 +77,6 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y # CONFIG_SLAB is not set # CONFIG_SLUB is not set CONFIG_SLOB=y @@ -126,6 +127,7 @@ CONFIG_CLASSIC_RCU=y CONFIG_CPU_SH2=y CONFIG_CPU_SH2A=y # CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set CONFIG_CPU_SUBTYPE_SH7203=y # CONFIG_CPU_SUBTYPE_SH7206 is not set # CONFIG_CPU_SUBTYPE_SH7263 is not set @@ -211,6 +213,8 @@ CONFIG_CPU_HAS_FPU=y # # Board support # +CONFIG_SH_RSK=y +# CONFIG_SH_RSK7201 is not set CONFIG_SH_RSK7203=y # @@ -296,6 +300,14 @@ CONFIG_BINFMT_ZFLAT=y CONFIG_BINFMT_SHARED_FLAT=y # CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set + +# +# Power management options (EXPERIMENTAL) +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_NET=y # @@ -477,6 +489,7 @@ CONFIG_BLK_DEV=y CONFIG_MISC_DEVICES=y # CONFIG_EEPROM_93CX6 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_C2PORT is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -603,11 +616,11 @@ CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_HWMON is not set CONFIG_THERMAL=y # CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -617,7 +630,11 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_MFD_TMIO is not set -# CONFIG_MFD_WM8400 is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set # # Multimedia devices @@ -702,19 +719,22 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICEFS=y CONFIG_USB_DEVICE_CLASS=y # CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_SUSPEND is not set # CONFIG_USB_OTG is not set # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set # # USB Host Controller Drivers # # CONFIG_USB_C67X00_HCD is not set # CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1760_HCD is not set # CONFIG_USB_SL811_HCD is not set CONFIG_USB_R8A66597_HCD=y +# CONFIG_USB_HWA_HCD is not set # # USB Device Class drivers @@ -725,11 +745,11 @@ CONFIG_USB_R8A66597_HCD=y # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# may also be needed; see USB_STORAGE Help for more information +# see USB_STORAGE Help for more information # # CONFIG_USB_LIBUSUAL is not set @@ -770,7 +790,22 @@ CONFIG_USB_R8A66597_HCD=y # CONFIG_USB_GADGET is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +CONFIG_LEDS_GPIO=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y # CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y @@ -812,6 +847,7 @@ CONFIG_RTC_DRV_SH=y # CONFIG_DMADEVICES is not set # CONFIG_UIO is not set # CONFIG_STAGING is not set +CONFIG_STAGING_EXCLUDE_BUILD=y # # File systems @@ -950,9 +986,14 @@ CONFIG_FRAME_POINTER=y # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FTRACE=y -# CONFIG_FTRACE is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# +# CONFIG_FUNCTION_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set # CONFIG_BOOT_TRACER is not set diff --git a/arch/sh/configs/rts7751r2dplus_qemu_defconfig b/arch/sh/configs/rts7751r2dplus_qemu_defconfig deleted file mode 100644 index ae8f63000fbf..000000000000 --- a/arch/sh/configs/rts7751r2dplus_qemu_defconfig +++ /dev/null @@ -1,949 +0,0 @@ -# -# Automatically generated make config: don't edit -# Linux kernel version: 2.6.27 -# Wed Oct 22 18:51:20 2008 -# -CONFIG_SUPERH=y -CONFIG_SUPERH32=y -CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" -CONFIG_RWSEM_GENERIC_SPINLOCK=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y -CONFIG_GENERIC_IRQ_PROBE=y -# CONFIG_GENERIC_GPIO is not set -CONFIG_GENERIC_TIME=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_SYS_SUPPORTS_PCI=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set -CONFIG_ARCH_NO_VIRT_TO_BUS=y -CONFIG_IO_TRAPPED=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_LOCALVERSION="" -CONFIG_LOCALVERSION_AUTO=y -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -# CONFIG_BSD_PROCESS_ACCT is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y -# CONFIG_RELAY is not set -# CONFIG_NAMESPACES is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_EMBEDDED=y -CONFIG_UID16=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -# CONFIG_KALLSYMS_EXTRA_PASS is not set -# CONFIG_HOTPLUG is not set -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_COMPAT_BRK=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_ANON_INODES=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SLOB is not set -CONFIG_PROFILING=y -# CONFIG_MARKERS is not set -CONFIG_OPROFILE=y -CONFIG_HAVE_OPROFILE=y -# CONFIG_KPROBES is not set -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -# CONFIG_MODULE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y -CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_BLK_DEV_INTEGRITY is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_DEFAULT_AS=y -# CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y -# CONFIG_FREEZER is not set - -# -# System type -# -CONFIG_CPU_SH4=y -# CONFIG_CPU_SUBTYPE_SH7619 is not set -# CONFIG_CPU_SUBTYPE_SH7203 is not set -# CONFIG_CPU_SUBTYPE_SH7206 is not set -# CONFIG_CPU_SUBTYPE_SH7263 is not set -# CONFIG_CPU_SUBTYPE_MXG is not set -# CONFIG_CPU_SUBTYPE_SH7705 is not set -# CONFIG_CPU_SUBTYPE_SH7706 is not set -# CONFIG_CPU_SUBTYPE_SH7707 is not set -# CONFIG_CPU_SUBTYPE_SH7708 is not set -# CONFIG_CPU_SUBTYPE_SH7709 is not set -# CONFIG_CPU_SUBTYPE_SH7710 is not set -# CONFIG_CPU_SUBTYPE_SH7712 is not set -# CONFIG_CPU_SUBTYPE_SH7720 is not set -# CONFIG_CPU_SUBTYPE_SH7721 is not set -# CONFIG_CPU_SUBTYPE_SH7750 is not set -# CONFIG_CPU_SUBTYPE_SH7091 is not set -# CONFIG_CPU_SUBTYPE_SH7750R is not set -# CONFIG_CPU_SUBTYPE_SH7750S is not set -# CONFIG_CPU_SUBTYPE_SH7751 is not set -CONFIG_CPU_SUBTYPE_SH7751R=y -# CONFIG_CPU_SUBTYPE_SH7760 is not set -# CONFIG_CPU_SUBTYPE_SH4_202 is not set -# CONFIG_CPU_SUBTYPE_SH7723 is not set -# CONFIG_CPU_SUBTYPE_SH7763 is not set -# CONFIG_CPU_SUBTYPE_SH7770 is not set -# CONFIG_CPU_SUBTYPE_SH7780 is not set -# CONFIG_CPU_SUBTYPE_SH7785 is not set -# CONFIG_CPU_SUBTYPE_SHX3 is not set -# CONFIG_CPU_SUBTYPE_SH7343 is not set -# CONFIG_CPU_SUBTYPE_SH7722 is not set -# CONFIG_CPU_SUBTYPE_SH7366 is not set -# CONFIG_CPU_SUBTYPE_SH5_101 is not set -# CONFIG_CPU_SUBTYPE_SH5_103 is not set - -# -# Memory management options -# -CONFIG_QUICKLIST=y -CONFIG_MMU=y -CONFIG_PAGE_OFFSET=0x80000000 -CONFIG_MEMORY_START=0x0c000000 -CONFIG_MEMORY_SIZE=0x04000000 -CONFIG_29BIT=y -CONFIG_VSYSCALL=y -CONFIG_ARCH_FLATMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_MAX_ACTIVE_REGIONS=1 -CONFIG_ARCH_POPULATES_NODE_MAP=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_PAGE_SIZE_4KB=y -# CONFIG_PAGE_SIZE_8KB is not set -# CONFIG_PAGE_SIZE_16KB is not set -# CONFIG_PAGE_SIZE_64KB is not set -CONFIG_ENTRY_OFFSET=0x00001000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_SPARSEMEM_STATIC=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set -# CONFIG_PHYS_ADDR_T_64BIT is not set -CONFIG_ZONE_DMA_FLAG=0 -CONFIG_NR_QUICK=2 -CONFIG_UNEVICTABLE_LRU=y - -# -# Cache configuration -# -# CONFIG_SH_DIRECT_MAPPED is not set -CONFIG_CACHE_WRITEBACK=y -# CONFIG_CACHE_WRITETHROUGH is not set -# CONFIG_CACHE_OFF is not set - -# -# Processor features -# -CONFIG_CPU_LITTLE_ENDIAN=y -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_SH_FPU=y -# CONFIG_SH_STORE_QUEUES is not set -CONFIG_CPU_HAS_INTEVT=y -CONFIG_CPU_HAS_SR_RB=y -CONFIG_CPU_HAS_PTEA=y -CONFIG_CPU_HAS_FPU=y - -# -# Board support -# -# CONFIG_SH_7751_SYSTEMH is not set -# CONFIG_SH_SECUREEDGE5410 is not set -CONFIG_SH_RTS7751R2D=y -# CONFIG_SH_LANDISK is not set -# CONFIG_SH_TITAN is not set -# CONFIG_SH_LBOX_RE2 is not set - -# -# RTS7751R2D Board Revision -# -CONFIG_RTS7751R2D_PLUS=y -# CONFIG_RTS7751R2D_1 is not set - -# -# Timer and clock configuration -# -CONFIG_SH_TMU=y -CONFIG_SH_TIMER_IRQ=16 -CONFIG_SH_PCLK_FREQ=60000000 -# CONFIG_NO_HZ is not set -# CONFIG_HIGH_RES_TIMERS is not set -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y - -# -# CPU Frequency scaling -# -# CONFIG_CPU_FREQ is not set - -# -# DMA support -# -# CONFIG_SH_DMA is not set - -# -# Companion Chips -# - -# -# Additional SuperH Device Drivers -# -CONFIG_HEARTBEAT=y -# CONFIG_PUSH_SWITCH is not set - -# -# Kernel features -# -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -# CONFIG_SCHED_HRTICK is not set -# CONFIG_KEXEC is not set -# CONFIG_CRASH_DUMP is not set -CONFIG_SECCOMP=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_GUSA=y -# CONFIG_GUSA_RB is not set - -# -# Boot options -# -CONFIG_ZERO_PAGE_OFFSET=0x00010000 -CONFIG_BOOT_LINK_OFFSET=0x00800000 -# CONFIG_UBC_WAKEUP is not set -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial" - -# -# Bus options -# -# CONFIG_PCI is not set -# CONFIG_ARCH_SUPPORTS_MSI is not set - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -# CONFIG_HAVE_AOUT is not set -# CONFIG_BINFMT_MISC is not set -# CONFIG_NET is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_MTD is not set -# CONFIG_PARPORT is not set -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_COW_COMMON is not set -# CONFIG_BLK_DEV_LOOP is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_BLK_DEV_HD is not set -CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -# CONFIG_RAID_ATTRS is not set -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -# CONFIG_SCSI_TGT is not set -# CONFIG_SCSI_NETLINK is not set -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -# CONFIG_BLK_DEV_SR is not set -# CONFIG_CHR_DEV_SG is not set -# CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# -# CONFIG_SCSI_MULTI_LUN is not set -# CONFIG_SCSI_CONSTANTS is not set -# CONFIG_SCSI_LOGGING is not set -# CONFIG_SCSI_SCAN_ASYNC is not set -CONFIG_SCSI_WAIT_SCAN=m - -# -# SCSI Transports -# -# CONFIG_SCSI_SPI_ATTRS is not set -# CONFIG_SCSI_FC_ATTRS is not set -# CONFIG_SCSI_SAS_LIBSAS is not set -# CONFIG_SCSI_SRP_ATTRS is not set -CONFIG_SCSI_LOWLEVEL=y -# CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_DH is not set -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_SATA_PMP=y -CONFIG_ATA_SFF=y -# CONFIG_SATA_MV is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_MD is not set -# CONFIG_PHONE is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set - -# -# Userland interfaces -# -# CONFIG_INPUT_MOUSEDEV is not set -# CONFIG_INPUT_JOYDEV is not set -# CONFIG_INPUT_EVDEV is not set -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_DEVKMEM=y -# CONFIG_SERIAL_NONSTANDARD is not set - -# -# Serial drivers -# -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_CONSOLE is not set -CONFIG_SERIAL_8250_NR_UARTS=4 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -# CONFIG_SERIAL_8250_EXTENDED is not set - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=1 -CONFIG_SERIAL_SH_SCI_CONSOLE=y -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_UNIX98_PTYS=y -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=256 -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=y -# CONFIG_R3964 is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_I2C is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y - -# -# SPI Master Controller Drivers -# -CONFIG_SPI_BITBANG=y -# CONFIG_SPI_SH_SCI is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_AT25 is not set -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_W1 is not set -# CONFIG_POWER_SUPPLY is not set -CONFIG_HWMON=y -# CONFIG_HWMON_VID is not set -# CONFIG_SENSORS_ADCXX is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_LM70 is not set -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -# CONFIG_HWMON_DEBUG_CHIP is not set -# CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set -# CONFIG_WATCHDOG is not set - -# -# Sonics Silicon Backplane -# -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_CORE is not set -CONFIG_MFD_SM501=y -# CONFIG_HTC_PASIC3 is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_WM8400 is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -CONFIG_DAB=y - -# -# Graphics support -# -# CONFIG_VGASTATE is not set -CONFIG_VIDEO_OUTPUT_CONTROL=m -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -# CONFIG_FB_SYS_FOPS is not set -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_S1D13XXX is not set -CONFIG_FB_SH_MOBILE_LCDC=m -CONFIG_FB_SM501=y -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -# CONFIG_LOGO_SUPERH_MONO is not set -# CONFIG_LOGO_SUPERH_VGA16 is not set -CONFIG_LOGO_SUPERH_CLUT224=y -CONFIG_SOUND=y -CONFIG_SOUND_OSS_CORE=y -CONFIG_SND=m -# CONFIG_SND_SEQUENCER is not set -# CONFIG_SND_MIXER_OSS is not set -# CONFIG_SND_PCM_OSS is not set -# CONFIG_SND_DYNAMIC_MINORS is not set -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -# CONFIG_SND_DEBUG is not set -CONFIG_SND_DRIVERS=y -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set -CONFIG_SND_SPI=y -CONFIG_SND_SUPERH=y -# CONFIG_SND_SOC is not set -CONFIG_SOUND_PRIME=m -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -# CONFIG_HID_DEBUG is not set -# CONFIG_HIDRAW is not set -# CONFIG_HID_PID is not set - -# -# Special HID drivers -# -CONFIG_HID_COMPAT=y -# CONFIG_USB_SUPPORT is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -CONFIG_RTC_DRV_R9701=y -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_DS3234 is not set - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_SH is not set -# CONFIG_DMADEVICES is not set -# CONFIG_UIO is not set -# CONFIG_STAGING is not set - -# -# File systems -# -CONFIG_EXT2_FS=y -# CONFIG_EXT2_FS_XATTR is not set -# CONFIG_EXT2_FS_XIP is not set -# CONFIG_EXT3_FS is not set -# CONFIG_EXT4_FS is not set -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y -# CONFIG_XFS_FS is not set -CONFIG_DNOTIFY=y -CONFIG_INOTIFY=y -CONFIG_INOTIFY_USER=y -# CONFIG_QUOTA is not set -# CONFIG_AUTOFS_FS is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -# CONFIG_TMPFS_POSIX_ACL is not set -# CONFIG_HUGETLBFS is not set -# CONFIG_HUGETLB_PAGE is not set -# CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_CRAMFS is not set -# CONFIG_VXFS_FS is not set -CONFIG_MINIX_FS=y -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set - -# -# Partition Types -# -# CONFIG_PARTITION_ADVANCED is not set -CONFIG_MSDOS_PARTITION=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="iso8859-1" -# CONFIG_NLS_CODEPAGE_437 is not set -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -CONFIG_NLS_CODEPAGE_932=y -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -# CONFIG_NLS_ASCII is not set -# CONFIG_NLS_ISO8859_1 is not set -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_UTF8 is not set - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -# CONFIG_PRINTK_TIME is not set -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=1024 -# CONFIG_MAGIC_SYSRQ is not set -# CONFIG_UNUSED_SYMBOLS is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -CONFIG_DEBUG_KERNEL=y -# CONFIG_DEBUG_SHIRQ is not set -CONFIG_DETECT_SOFTLOCKUP=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -# CONFIG_TIMER_STATS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_SPINLOCK_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_WRITECOUNT is not set -# CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_FRAME_POINTER is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FTRACE=y -# CONFIG_FTRACE is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set -# CONFIG_SAMPLES is not set -# CONFIG_SH_STANDARD_BIOS is not set -CONFIG_EARLY_SCIF_CONSOLE=y -CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000 -CONFIG_EARLY_PRINTK=y -# CONFIG_DEBUG_BOOTMEM is not set -# CONFIG_DEBUG_STACKOVERFLOW is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_4KSTACKS is not set -# CONFIG_IRQSTACKS is not set -# CONFIG_SH_KGDB is not set - -# -# Security options -# -# CONFIG_KEYS is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -# CONFIG_CRYPTO_FIPS is not set -# CONFIG_CRYPTO_MANAGER is not set -# CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_CRYPTD is not set -# CONFIG_CRYPTO_AUTHENC is not set -# CONFIG_CRYPTO_TEST is not set - -# -# Authenticated Encryption with Associated Data -# -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_GCM is not set -# CONFIG_CRYPTO_SEQIV is not set - -# -# Block modes -# -# CONFIG_CRYPTO_CBC is not set -# CONFIG_CRYPTO_CTR is not set -# CONFIG_CRYPTO_CTS is not set -# CONFIG_CRYPTO_ECB is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_XTS is not set - -# -# Hash modes -# -# CONFIG_CRYPTO_HMAC is not set -# CONFIG_CRYPTO_XCBC is not set - -# -# Digest -# -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_MD4 is not set -# CONFIG_CRYPTO_MD5 is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_RMD128 is not set -# CONFIG_CRYPTO_RMD160 is not set -# CONFIG_CRYPTO_RMD256 is not set -# CONFIG_CRYPTO_RMD320 is not set -# CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_WP512 is not set - -# -# Ciphers -# -# CONFIG_CRYPTO_AES is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_DES is not set -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_SALSA20 is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_TWOFISH is not set - -# -# Compression -# -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_LZO is not set - -# -# Random Number Generation -# -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_HW=y - -# -# Library routines -# -CONFIG_BITREVERSE=y -# CONFIG_CRC_CCITT is not set -# CONFIG_CRC16 is not set -CONFIG_CRC_T10DIF=y -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -CONFIG_PLIST=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 075f42ed5b09..be246f381507 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27 -# Wed Oct 22 19:00:21 2008 +# Linux kernel version: 2.6.28-rc6 +# Thu Dec 4 16:40:25 2008 # CONFIG_SUPERH=y CONFIG_SUPERH32=y @@ -74,7 +74,6 @@ CONFIG_EVENTFD=y # CONFIG_SHMEM is not set CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set @@ -127,6 +126,7 @@ CONFIG_CPU_SH4=y CONFIG_CPU_SH4A=y CONFIG_CPU_SH4AL_DSP=y # CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set # CONFIG_CPU_SUBTYPE_SH7203 is not set # CONFIG_CPU_SUBTYPE_SH7206 is not set # CONFIG_CPU_SUBTYPE_SH7263 is not set @@ -227,7 +227,7 @@ CONFIG_SH_7343_SOLUTION_ENGINE=y # CONFIG_SH_TMU=y CONFIG_SH_TIMER_IRQ=16 -CONFIG_SH_PCLK_FREQ=27000000 +CONFIG_SH_PCLK_FREQ=33333333 # CONFIG_NO_HZ is not set # CONFIG_HIGH_RES_TIMERS is not set CONFIG_GENERIC_CLOCKEVENTS_BUILD=y @@ -274,7 +274,8 @@ CONFIG_GUSA=y # CONFIG_ZERO_PAGE_OFFSET=0x00001000 CONFIG_BOOT_LINK_OFFSET=0x00800000 -# CONFIG_CMDLINE_BOOL is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC0,115200" # # Bus options @@ -463,6 +464,7 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set # CONFIG_BLK_DEV_LOOP is not set # CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set # CONFIG_BLK_DEV_RAM is not set # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set @@ -519,23 +521,10 @@ CONFIG_NETDEVICES=y # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set # CONFIG_VETH is not set -# CONFIG_PHYLIB is not set -CONFIG_NET_ETHERNET=y +# CONFIG_NET_ETHERNET is not set CONFIG_MII=y -# CONFIG_AX88796 is not set -# CONFIG_STNIC is not set -CONFIG_SMC91X=y -# CONFIG_SMC911X is not set -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set -# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set -# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set -# CONFIG_B44 is not set -CONFIG_NETDEV_1000=y -CONFIG_NETDEV_10000=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set # # Wireless LAN @@ -543,6 +532,26 @@ CONFIG_NETDEV_10000=y # CONFIG_WLAN_PRE80211 is not set # CONFIG_WLAN_80211 is not set # CONFIG_IWLWIFI_LEDS is not set + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +CONFIG_USB_NET_CDCETHER=y +CONFIG_USB_NET_DM9601=y +# CONFIG_USB_NET_SMSC95XX is not set +# CONFIG_USB_NET_GL620A is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -597,13 +606,17 @@ CONFIG_DEVKMEM=y # # Serial drivers # -# CONFIG_SERIAL_8250 is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_CONSOLE is not set +CONFIG_SERIAL_8250_NR_UARTS=2 +CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +# CONFIG_SERIAL_8250_EXTENDED is not set # # Non-8250 serial port support # CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=2 +CONFIG_SERIAL_SH_SCI_NR_UARTS=4 CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y @@ -615,7 +628,51 @@ CONFIG_HW_RANDOM=y # CONFIG_R3964 is not set # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set -# CONFIG_I2C is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_CHARDEV is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_SH_MOBILE=y +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_AT24 is not set +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set # CONFIG_SPI is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set @@ -623,11 +680,11 @@ CONFIG_HW_RANDOM=y # CONFIG_THERMAL is not set # CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -637,7 +694,10 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_REGULATOR is not set # # Multimedia devices @@ -657,6 +717,16 @@ CONFIG_VIDEO_MEDIA=y # Multimedia drivers # # CONFIG_MEDIA_ATTACH is not set +CONFIG_MEDIA_TUNER=y +# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set +CONFIG_MEDIA_TUNER_SIMPLE=y +CONFIG_MEDIA_TUNER_TDA8290=y +CONFIG_MEDIA_TUNER_TDA9887=y +CONFIG_MEDIA_TUNER_TEA5761=y +CONFIG_MEDIA_TUNER_TEA5767=y +CONFIG_MEDIA_TUNER_MT20XX=y +CONFIG_MEDIA_TUNER_XC2028=y +CONFIG_MEDIA_TUNER_XC5000=y CONFIG_VIDEO_V4L2=y CONFIG_VIDEO_V4L1=y CONFIG_VIDEO_CAPTURE_DRIVERS=y @@ -665,8 +735,57 @@ CONFIG_VIDEO_CAPTURE_DRIVERS=y CONFIG_VIDEO_HELPER_CHIPS_AUTO=y # CONFIG_VIDEO_VIVI is not set # CONFIG_VIDEO_CPIA is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_VIDEO_SAA5246A is not set +# CONFIG_VIDEO_SAA5249 is not set # CONFIG_SOC_CAMERA is not set +CONFIG_V4L_USB_DRIVERS=y +# CONFIG_USB_VIDEO_CLASS is not set +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_ZC3XX is not set +# CONFIG_VIDEO_PVRUSB2 is not set +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_VIDEO_USBVISION is not set +# CONFIG_USB_VICAM is not set +# CONFIG_USB_IBMCAM is not set +# CONFIG_USB_KONICAWC is not set +# CONFIG_USB_QUICKCAM_MESSENGER is not set +# CONFIG_USB_ET61X251 is not set +# CONFIG_VIDEO_OVCAMCHIP is not set +# CONFIG_USB_OV511 is not set +# CONFIG_USB_SE401 is not set +# CONFIG_USB_SN9C102 is not set +# CONFIG_USB_STV680 is not set +# CONFIG_USB_ZC0301 is not set +# CONFIG_USB_PWC is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set CONFIG_RADIO_ADAPTERS=y +# CONFIG_USB_DSBR is not set +# CONFIG_USB_SI470X is not set +# CONFIG_USB_MR800 is not set # CONFIG_DAB is not set # @@ -700,6 +819,7 @@ CONFIG_FB_CFB_IMAGEBLIT=m CONFIG_FB_SH_MOBILE_LCDC=m # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -737,27 +857,147 @@ CONFIG_SND_DRIVERS=y # CONFIG_SND_SERIAL_U16550 is not set # CONFIG_SND_MPU401 is not set CONFIG_SND_SUPERH=y +CONFIG_SND_USB=y +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_CAIAQ is not set # CONFIG_SND_SOC is not set # CONFIG_SOUND_PRIME is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y # CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=y # CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set # # Special HID drivers # CONFIG_HID_COMPAT=y -# CONFIG_USB_SUPPORT is not set +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_BRIGHT=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DELL=y +CONFIG_HID_EZKEY=y +CONFIG_HID_GYRATION=y +CONFIG_HID_LOGITECH=y +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_PANTHERLORD=y +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_ZEROPLUS_FF is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +CONFIG_USB=y +CONFIG_USB_DEBUG=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_ISP116X_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HWA_HCD is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# + +# +# see USB_STORAGE Help for more information +# +# CONFIG_USB_STORAGE is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_BERRY_CHARGE is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set +# CONFIG_USB_GADGET is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set # CONFIG_ACCESSIBILITY is not set # CONFIG_RTC_CLASS is not set # CONFIG_DMADEVICES is not set -# CONFIG_UIO is not set +CONFIG_UIO=y +# CONFIG_UIO_PDRV is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_SMX is not set +# CONFIG_UIO_SERCOS3 is not set # CONFIG_STAGING is not set +CONFIG_STAGING_EXCLUDE_BUILD=y # # File systems @@ -889,8 +1129,13 @@ CONFIG_FRAME_WARN=1024 # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FTRACE=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y + +# +# Tracers +# # CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set # CONFIG_SH_STANDARD_BIOS is not set diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index 1ac812d24488..ab956adacb47 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile @@ -3,7 +3,6 @@ # obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o -obj-$(CONFIG_ISA_DMA_API) += dma-isa.o obj-$(CONFIG_SH_DMA) += dma-sh.o obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o obj-$(CONFIG_SH_DMABRG) += dmabrg.o diff --git a/arch/sh/drivers/dma/dma-isa.c b/arch/sh/drivers/dma/dma-isa.c deleted file mode 100644 index 5fb044b791c3..000000000000 --- a/arch/sh/drivers/dma/dma-isa.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * arch/sh/drivers/dma/dma-isa.c - * - * Generic ISA DMA wrapper for SH DMA API - * - * Copyright (C) 2003, 2004 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <asm/dma.h> - -/* - * This implements a small wrapper set to make code using the old ISA DMA API - * work with the SH DMA API. Since most of the work in the new API happens - * at ops->xfer() time, we simply use the various set_dma_xxx() routines to - * fill in per-channel info, and then hand hand this off to ops->xfer() at - * enable_dma() time. - * - * For channels that are doing on-demand data transfer via cascading, the - * channel itself will still need to be configured through the new API. As - * such, this code is meant for only the simplest of tasks (and shouldn't be - * used in any new drivers at all). - * - * NOTE: ops->xfer() is the preferred way of doing things. However, there - * are some users of the ISA DMA API that exist in common code that we - * don't necessarily want to go out of our way to break, so we still - * allow for some compatibility at that level. Any new code is strongly - * advised to run far away from the ISA DMA API and use the SH DMA API - * directly. - */ -unsigned long claim_dma_lock(void) -{ - unsigned long flags; - - spin_lock_irqsave(&dma_spin_lock, flags); - - return flags; -} -EXPORT_SYMBOL(claim_dma_lock); - -void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} -EXPORT_SYMBOL(release_dma_lock); - -void disable_dma(unsigned int chan) -{ - /* Nothing */ -} -EXPORT_SYMBOL(disable_dma); - -void enable_dma(unsigned int chan) -{ - struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = &info->channels[chan]; - - info->ops->xfer(channel); -} -EXPORT_SYMBOL(enable_dma); - -void clear_dma_ff(unsigned int chan) -{ - /* Nothing */ -} -EXPORT_SYMBOL(clear_dma_ff); - -void set_dma_mode(unsigned int chan, char mode) -{ - struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = &info->channels[chan]; - - channel->mode = mode; -} -EXPORT_SYMBOL(set_dma_mode); - -void set_dma_addr(unsigned int chan, unsigned int addr) -{ - struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = &info->channels[chan]; - - /* - * Single address mode is the only thing supported through - * this interface. - */ - if ((channel->mode & DMA_MODE_MASK) == DMA_MODE_READ) { - channel->sar = addr; - } else { - channel->dar = addr; - } -} -EXPORT_SYMBOL(set_dma_addr); - -void set_dma_count(unsigned int chan, unsigned int count) -{ - struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = &info->channels[chan]; - - channel->count = count; -} -EXPORT_SYMBOL(set_dma_count); - diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index b2ffe649c7c0..50887a592dd0 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -205,7 +205,8 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan) #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) #define dmaor_read_reg() ctrl_inw(DMAOR) #define dmaor_write_reg(data) ctrl_outw(data, DMAOR) #else diff --git a/arch/sh/drivers/dma/dma-sh.h b/arch/sh/drivers/dma/dma-sh.h index b05af34fc15d..05fecd5428e4 100644 --- a/arch/sh/drivers/dma/dma-sh.h +++ b/arch/sh/drivers/dma/dma-sh.h @@ -29,6 +29,7 @@ #define RS_IN 0x00000200 #define RS_OUT 0x00000300 #define TS_BLK 0x00000040 +#define TM_BUR 0x00000020 #define CHCR_DE 0x00000001 #define CHCR_TE 0x00000002 #define CHCR_IE 0x00000004 diff --git a/arch/sh/drivers/pci/ops-sh03.c b/arch/sh/drivers/pci/ops-sh03.c index ebb58e605d9d..e1703ff5a4d2 100644 --- a/arch/sh/drivers/pci/ops-sh03.c +++ b/arch/sh/drivers/pci/ops-sh03.c @@ -18,7 +18,8 @@ */ int __init pcibios_init_platform(void) { - return 1; + __set_io_port_base(SH7751_PCI_IO_BASE); + return 1; } static struct resource sh7751_io_resource = { diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c index b2a2bfa3c1bd..078dc44d6b08 100644 --- a/arch/sh/drivers/pci/pci-sh7780.c +++ b/arch/sh/drivers/pci/pci-sh7780.c @@ -123,16 +123,14 @@ int __init sh7780_pcic_init(struct sh4_pci_address_map *map) * Window0 = map->window0.size @ non-cached area base = SDRAM * Window1 = map->window1.size @ cached area base = SDRAM */ - word = ((map->window0.size - 1) & 0x1ff00001) | 0x01; - pci_write_reg(0x07f00001, SH4_PCILSR0); - word = ((map->window1.size - 1) & 0x1ff00001) | 0x01; + word = (CONFIG_MEMORY_SIZE - 0x00100000) | 0x00000001; + pci_write_reg(word, SH4_PCILSR0); pci_write_reg(0x00000001, SH4_PCILSR1); /* Set the values on window 0 PCI config registers */ - word = P2SEGADDR(map->window0.base); - pci_write_reg(0xa8000000, SH4_PCILAR0); - pci_write_reg(0x08000000, SH7780_PCIMBAR0); + word = (CONFIG_MEMORY_SIZE > 0x08000000) ? 0x10000000 : 0x08000000; + pci_write_reg(word | 0xa0000000, SH4_PCILAR0); + pci_write_reg(word, SH7780_PCIMBAR0); /* Set the values on window 1 PCI config registers */ - word = P2SEGADDR(map->window1.base); pci_write_reg(0x00000000, SH4_PCILAR1); pci_write_reg(0x00000000, SH7780_PCIMBAR1); diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 2702d81bfc0d..36736c7e93db 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -49,5 +49,16 @@ /* Check if an address can be reached in 29 bits */ #define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) +#ifdef CONFIG_SH_STORE_QUEUES +/* + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/bitops-grb.h b/arch/sh/include/asm/bitops-grb.h index a5907b94395b..e73af33acbf4 100644 --- a/arch/sh/include/asm/bitops-grb.h +++ b/arch/sh/include/asm/bitops-grb.h @@ -166,4 +166,7 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return retval; } + +#include <asm-generic/bitops/non-atomic.h> + #endif /* __ASM_SH_BITOPS_GRB_H */ diff --git a/arch/sh/include/asm/bitops-irq.h b/arch/sh/include/asm/bitops-irq.h deleted file mode 100644 index 653a12750584..000000000000 --- a/arch/sh/include/asm/bitops-irq.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef __ASM_SH_BITOPS_IRQ_H -#define __ASM_SH_BITOPS_IRQ_H - -static inline void set_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a |= mask; - local_irq_restore(flags); -} - -static inline void clear_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a &= ~mask; - local_irq_restore(flags); -} - -static inline void change_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a ^= mask; - local_irq_restore(flags); -} - -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore(flags); - - return retval; -} - -#endif /* __ASM_SH_BITOPS_IRQ_H */ diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h index 43b8e1a8239e..1d2fc0b010ad 100644 --- a/arch/sh/include/asm/bitops-llsc.h +++ b/arch/sh/include/asm/bitops-llsc.h @@ -141,4 +141,6 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return retval != 0; } +#include <asm-generic/bitops/non-atomic.h> + #endif /* __ASM_SH_BITOPS_LLSC_H */ diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h new file mode 100644 index 000000000000..f0ae7e9218e0 --- /dev/null +++ b/arch/sh/include/asm/bitops-op32.h @@ -0,0 +1,142 @@ +#ifndef __ASM_SH_BITOPS_OP32_H +#define __ASM_SH_BITOPS_OP32_H + +/* + * The bit modifying instructions on SH-2A are only capable of working + * with a 3-bit immediate, which signifies the shift position for the bit + * being worked on. + */ +#if defined(__BIG_ENDIAN) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) +#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE) +#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE) +#else +#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE) +#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) +#endif + +#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) + +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bset.b %1, @(%O2,%0) ! __set_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; + } +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bclr.b %1, @(%O2,%0) ! __clear_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), + "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; + } +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bxor.b %1, @(%O2,%0) ! __change_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), + "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; + } +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 367930d8e5ae..ebe595b7ab1f 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -13,21 +13,22 @@ #ifdef CONFIG_GUSA_RB #include <asm/bitops-grb.h> +#elif defined(CONFIG_CPU_SH2A) +#include <asm-generic/bitops/atomic.h> +#include <asm/bitops-op32.h> #elif defined(CONFIG_CPU_SH4A) #include <asm/bitops-llsc.h> #else -#include <asm/bitops-irq.h> +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> #endif - /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include <asm-generic/bitops/non-atomic.h> - #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) { diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 121b2ecddfc3..4924ff6f5439 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -25,7 +25,7 @@ static void __init check_bugs(void) case CPU_SH7619: *p++ = '2'; break; - case CPU_SH7203 ... CPU_MXG: + case CPU_SH7201 ... CPU_MXG: *p++ = '2'; *p++ = 'a'; break; diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036a1bdc..ccb1d93bb043 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -108,13 +108,11 @@ typedef struct user_fpu_struct elf_fpregset_t; #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) -#ifdef CONFIG_SUPERH32 /* * Enable dump using regset. * This covers all of general/DSP/FPU regs. */ #define CORE_DUMP_USE_REGSET -#endif #define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC @@ -204,7 +202,7 @@ do { \ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); extern unsigned int vdso_enabled; extern void __kernel_vsyscall; diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 3aed362c9463..8fea7d8c8258 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -1,8 +1,34 @@ #ifndef __ASM_SH_FTRACE_H #define __ASM_SH_FTRACE_H +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + #ifndef __ASSEMBLY__ extern void mcount(void); -#endif + +#define MCOUNT_ADDR ((long)(mcount)) + +#ifdef CONFIG_DYNAMIC_FTRACE +#define CALLER_ADDR ((long)(ftrace_caller)) +#define STUB_ADDR ((long)(ftrace_stub)) + +#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) + +struct dyn_arch_ftrace { + /* No extra data needed on sh */ +}; + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* 'addr' is the memory table address. */ + return addr; +} + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FUNCTION_TRACER */ #endif /* __ASM_SH_FTRACE_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 65eaae34e753..61f6dae40534 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -260,6 +260,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return (void __iomem *)P2SEGADDR(offset); } + + /* P4 above the store queues are always mapped. */ + if (unlikely(offset >= P3_ADDR_MAX)) + return (void __iomem *)P4SEGADDR(offset); #endif return __ioremap(offset, size, flags); diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 24e42078f36f..72704ed725e5 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h @@ -1,21 +1,7 @@ -/* - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Based on original code by Glenn Engel, Jim Kingdon, - * David Grothe <dave@gcom.com>, Tigran Aivazian, <tigran@sco.com> and - * Amit S. Kale <akale@veritas.com> - * - * Super-H port based on sh-stub.c (Ben Lee and Steve Chamberlain) by - * Henry Bell <henry.bell@st.com> - * - * Header file for low-level support for remote debug using GDB. - * - */ - -#ifndef __KGDB_H -#define __KGDB_H +#ifndef __ASM_SH_KGDB_H +#define __ASM_SH_KGDB_H +#include <asm/cacheflush.h> #include <asm/ptrace.h> /* Same as pt_regs but has vbr in place of syscall_nr */ @@ -30,40 +16,26 @@ struct kgdb_regs { unsigned long vbr; }; -/* State info */ -extern char kgdb_in_gdb_mode; -extern int kgdb_nofault; /* Ignore bus errors (in gdb mem access) */ -extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ +enum regnames { + GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7, + GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15, -/* SCI */ -extern int kgdb_portnum; -extern int kgdb_baud; -extern char kgdb_parity; -extern char kgdb_bits; + GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR, +}; -/* Init and interface stuff */ -extern int kgdb_init(void); -extern int (*kgdb_getchar)(void); -extern void (*kgdb_putchar)(int); +#define NUMREGBYTES ((GDB_VBR + 1) * 4) -/* Trap functions */ -typedef void (kgdb_debug_hook_t)(struct pt_regs *regs); -typedef void (kgdb_bus_error_hook_t)(void); -extern kgdb_debug_hook_t *kgdb_debug_hook; -extern kgdb_bus_error_hook_t *kgdb_bus_err_hook; +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__ ("trapa #0x3c\n"); +} -/* Console */ -struct console; -void kgdb_console_write(struct console *co, const char *s, unsigned count); -extern int kgdb_console_setup(struct console *, char *); +/* State info */ +extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ -/* Prototypes for jmp fns */ -#define _JBLEN 9 -typedef int jmp_buf[_JBLEN]; -extern void longjmp(jmp_buf __jmpb, int __retval); -extern int setjmp(jmp_buf __jmpb); +#define BUFMAX 2048 -/* Forced breakpoint */ -#define breakpoint() __asm__ __volatile__("trapa #0x3c") +#define CACHE_FLUSH_IS_SAFE 1 +#define BREAK_INSTR_SIZE 2 -#endif +#endif /* __ASM_SH_KGDB_H */ diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index f1bae02ef7b6..64b1c16a0f03 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -14,8 +14,6 @@ #include <linux/time.h> #include <asm/machtypes.h> -struct device; - struct sh_machine_vector { void (*mv_setup)(char **cmdline_p); const char *mv_name; @@ -45,9 +43,6 @@ struct sh_machine_vector { int (*mv_irq_demux)(int irq); void (*mv_init_irq)(void); - void (*mv_init_pci)(void); - - void (*mv_heartbeat)(void); void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); void (*mv_ioport_unmap)(void __iomem *); diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 04c0c9733ad6..5d9157bd474d 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -22,7 +22,7 @@ #define MMU_CONTEXT_ASID_MASK 0x000000ff #define MMU_CONTEXT_VERSION_MASK 0xffffff00 #define MMU_CONTEXT_FIRST_VERSION 0x00000100 -#define NO_CONTEXT 0 +#define NO_CONTEXT 0UL /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 @@ -130,7 +130,7 @@ static inline void switch_mm(struct mm_struct *prev, #define destroy_context(mm) do { } while (0) #define set_asid(asid) do { } while (0) #define get_asid() (0) -#define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) +#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) #define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h new file mode 100644 index 000000000000..ee839ee58ac8 --- /dev/null +++ b/arch/sh/include/asm/mutex-llsc.h @@ -0,0 +1,112 @@ +/* + * arch/sh/include/asm/mutex-llsc.h + * + * SH-4A optimized mutex locking primitives + * + * Please look into asm-generic/mutex-xchg.h for a formal definition. + */ +#ifndef __ASM_SH_MUTEX_LLSC_H +#define __ASM_SH_MUTEX_LLSC_H + +/* + * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure. + * with a bastardized atomic decrement (it is not a reliable atomic decrement + * but it satisfies the defined semantics for our purpose, while being + * smaller and faster than a real atomic decrement or atomic swap. + * The idea is to attempt decrementing the lock value only once. If once + * decremented it isn't zero, or if its store-back fails due to a dispute + * on the exclusive store, we simply bail out immediately through the slow + * path where the lock will be reattempted until it succeeds. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ __volatile__ ( + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "t"); + + __res |= !__ex_flag; + if (unlikely(__res != 0)) + fail_fn(count); +} + +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ __volatile__ ( + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "t"); + + __res |= !__ex_flag; + if (unlikely(__res != 0)) + __res = fail_fn(count); + + return __res; +} + +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ __volatile__ ( + "movli.l @%2, %0 \n\t" + "add #1, %0 \n\t" + "movco.l %0, @%2 \n\t" + "movt %1 \n\t" + : "=&z" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "t"); + + __res |= !__ex_flag; + if (unlikely(__res <= 0)) + fail_fn(count); +} + +/* + * If the unlock was done on a contended lock, or if the unlock simply fails + * then the mutex remains locked. + */ +#define __mutex_slowpath_needs_to_unlock() 1 + +/* + * For __mutex_fastpath_trylock we do an atomic decrement and check the + * result and put it in the __res variable. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __res, __orig; + + __asm__ __volatile__ ( + "1: movli.l @%2, %0 \n\t" + "dt %0 \n\t" + "movco.l %0,@%2 \n\t" + "bf 1b \n\t" + "cmp/eq #0,%0 \n\t" + "bt 2f \n\t" + "mov #0, %1 \n\t" + "bf 3f \n\t" + "2: mov #1, %1 \n\t" + "3: " + : "=&z" (__orig), "=&r" (__res) + : "r" (&count->counter) + : "t"); + + return __res; +} +#endif /* __ASM_SH_MUTEX_LLSC_H */ diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h index 458c1f7fbc18..d8e37716a4a0 100644 --- a/arch/sh/include/asm/mutex.h +++ b/arch/sh/include/asm/mutex.h @@ -5,5 +5,8 @@ * implementation in place, or pick the atomic_xchg() based generic * implementation. (see asm-generic/mutex-xchg.h for details) */ - +#if defined(CONFIG_CPU_SH4A) +#include <asm/mutex-llsc.h> +#else #include <asm-generic/mutex-dec.h> +#endif diff --git a/arch/sh/include/asm/pm.h b/arch/sh/include/asm/pm.h deleted file mode 100644 index 56fdbd6b1c94..000000000000 --- a/arch/sh/include/asm/pm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright 2006 (c) Andriy Skulysh <askulysh@gmail.com> - * - */ -#ifndef __ASM_SH_PM_H -#define __ASM_SH_PM_H - -extern u8 wakeup_start; -extern u8 wakeup_end; - -void pm_enter(void); - -#endif diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 693364a20ad7..1ef4b24d7619 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -18,7 +18,7 @@ enum cpu_type { CPU_SH7619, /* SH-2A types */ - CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, + CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, /* SH-3 types */ CPU_SH7705, CPU_SH7706, CPU_SH7707, @@ -82,6 +82,9 @@ extern struct sh_cpuinfo cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] +#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") +#define cpu_relax() barrier() + /* Forward decl */ struct seq_operations; diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index a46a0207e977..d79063c5eb9c 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -175,6 +175,15 @@ static __inline__ void enable_fpu(void) void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); + +#ifdef CONFIG_DUMP_CODE +void show_code(struct pt_regs *regs); +#else +static inline void show_code(struct pt_regs *regs) +{ +} +#endif + extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) @@ -182,9 +191,6 @@ extern unsigned long get_wchan(struct task_struct *p); #define user_stack_pointer(regs) ((regs)->regs[15]) -#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") -#define cpu_relax() barrier() - #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ defined(CONFIG_CPU_SH4) #define PREFETCH_STRIDE L1_CACHE_BYTES diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index b0b4824dfc4c..803177fcf086 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -226,9 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.pc) #define KSTK_ESP(tsk) ((tsk)->thread.sp) -#define user_stack_pointer(regs) ((regs)->sp) - -#define cpu_relax() barrier() +#define user_stack_pointer(regs) ((regs)->regs[15]) #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_PROCESSOR_64_H */ diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 3ad18e91bca6..12912ab80c15 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -86,6 +86,7 @@ struct pt_dspregs { unsigned long re; unsigned long mod; }; +#endif #define PTRACE_GETREGS 12 /* General registers */ #define PTRACE_SETREGS 13 @@ -100,7 +101,6 @@ struct pt_dspregs { #define PTRACE_GETDSPREGS 55 /* DSP registers */ #define PTRACE_SETDSPREGS 56 -#endif #ifdef __KERNEL__ #include <asm/addrspace.h> diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h index 0ca261956e3d..d9c96d7cf6c7 100644 --- a/arch/sh/include/asm/sh_bios.h +++ b/arch/sh/include/asm/sh_bios.h @@ -10,7 +10,6 @@ extern void sh_bios_console_write(const char *buf, unsigned int len); extern void sh_bios_char_out(char ch); -extern int sh_bios_in_gdb_mode(void); extern void sh_bios_gdb_detach(void); extern void sh_bios_get_node_addr(unsigned char *node_addr); diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h index aa1fef229c78..742007172624 100644 --- a/arch/sh/include/asm/string_64.h +++ b/arch/sh/include/asm/string_64.h @@ -1,17 +1,20 @@ #ifndef __ASM_SH_STRING_64_H #define __ASM_SH_STRING_64_H -/* - * include/asm-sh/string_64.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ +#ifdef __KERNEL__ + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *dest, const void *src, size_t count); +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *__dest, const char *__src); + +#endif /* __KERNEL__ */ + #endif /* __ASM_SH_STRING_64_H */ diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 54773f26cd44..05a868a71ef5 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -5,7 +5,7 @@ #include <linux/sched.h> #include <asm/ptrace.h> -/* The system call number is given by the user in %g1 */ +/* The system call number is given by the user in R3 */ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index bcaaa8ca4d70..e1143b9784d6 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -1,6 +1,80 @@ #ifndef __ASM_SH_SYSCALL_64_H #define __ASM_SH_SYSCALL_64_H -#include <asm-generic/syscall.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <asm/ptrace.h> + +/* The system call number is given by the user in R9 */ +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* + * XXX: This needs some thought. On SH we don't + * save away the original R9 value anywhere. + */ +} + +static inline bool syscall_has_error(struct pt_regs *regs) +{ + return (regs->sr & 0x1) ? true : false; +} +static inline void syscall_set_error(struct pt_regs *regs) +{ + regs->sr |= 0x1; +} +static inline void syscall_clear_error(struct pt_regs *regs) +{ + regs->sr &= ~0x1; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + return syscall_has_error(regs) ? regs->regs[9] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[9]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + syscall_set_error(regs); + regs->regs[9] = -error; + } else { + syscall_clear_error(regs); + regs->regs[9] = val; + } +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); +} #endif /* __ASM_SH_SYSCALL_64_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 6160fe445161..c9ec6af8e745 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -175,6 +175,8 @@ asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) BUILD_TRAP_HANDLER(address_error); BUILD_TRAP_HANDLER(debug); BUILD_TRAP_HANDLER(bug); +BUILD_TRAP_HANDLER(breakpoint); +BUILD_TRAP_HANDLER(singlestep); BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h new file mode 100644 index 000000000000..d8f89770275b --- /dev/null +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -0,0 +1,258 @@ +#ifndef __ASM_SH_UNALIGNED_SH4A_H +#define __ASM_SH_UNALIGNED_SH4A_H + +/* + * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. + * Support for 16 and 64-bit accesses are done through shifting and + * masking relative to the endianness. Unaligned stores are not supported + * by the instruction encoding, so these continue to use the packed + * struct. + * + * The same note as with the movli.l/movco.l pair applies here, as long + * as the load is gauranteed to be inlined, nothing else will hook in to + * r0 and we get the return value for free. + * + * NOTE: Due to the fact we require r0 encoding, care should be taken to + * avoid mixing these heavily with other r0 consumers, such as the atomic + * ops. Failure to adhere to this can result in the compiler running out + * of spill registers and blowing up when building at low optimization + * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. + */ +#include <linux/types.h> +#include <asm/byteorder.h> + +static __always_inline u32 __get_unaligned_cpu32(const u8 *p) +{ + unsigned long unaligned; + + __asm__ __volatile__ ( + "movua.l @%1, %0\n\t" + : "=z" (unaligned) + : "r" (p) + ); + + return unaligned; +} + +struct __una_u16 { u16 x __attribute__((packed)); }; +struct __una_u32 { u32 x __attribute__((packed)); }; +struct __una_u64 { u64 x __attribute__((packed)); }; + +static inline u16 __get_unaligned_cpu16(const u8 *p) +{ +#ifdef __LITTLE_ENDIAN + return __get_unaligned_cpu32(p) & 0xffff; +#else + return __get_unaligned_cpu32(p) >> 16; +#endif +} + +/* + * Even though movua.l supports auto-increment on the read side, it can + * only store to r0 due to instruction encoding constraints, so just let + * the compiler sort it out on its own. + */ +static inline u64 __get_unaligned_cpu64(const u8 *p) +{ +#ifdef __LITTLE_ENDIAN + return (u64)__get_unaligned_cpu32(p + 4) << 32 | + __get_unaligned_cpu32(p); +#else + return (u64)__get_unaligned_cpu32(p) << 32 | + __get_unaligned_cpu32(p + 4); +#endif +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpu(__get_unaligned_cpu16(p)); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpu(__get_unaligned_cpu32(p)); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpu(__get_unaligned_cpu64(p)); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpu(__get_unaligned_cpu16(p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpu(__get_unaligned_cpu32(p)); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpu(__get_unaligned_cpu64(p)); +} + +static inline void __put_le16_noalign(u8 *p, u16 val) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_le32_noalign(u8 *p, u32 val) +{ + __put_le16_noalign(p, val); + __put_le16_noalign(p + 2, val >> 16); +} + +static inline void __put_le64_noalign(u8 *p, u64 val) +{ + __put_le32_noalign(p, val); + __put_le32_noalign(p + 4, val >> 32); +} + +static inline void __put_be16_noalign(u8 *p, u16 val) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_be32_noalign(u8 *p, u32 val) +{ + __put_be16_noalign(p, val >> 16); + __put_be16_noalign(p + 2, val); +} + +static inline void __put_be64_noalign(u8 *p, u64 val) +{ + __put_be32_noalign(p, val >> 32); + __put_be32_noalign(p + 4, val); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u16 *)p)->x = val; +#else + __put_le16_noalign(p, val); +#endif +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u32 *)p)->x = val; +#else + __put_le32_noalign(p, val); +#endif +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u64 *)p)->x = val; +#else + __put_le64_noalign(p, val); +#endif +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u16 *)p)->x = val; +#else + __put_be16_noalign(p, val); +#endif +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u32 *)p)->x = val; +#else + __put_be32_noalign(p, val); +#endif +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u64 *)p)->x = val; +#else + __put_be64_noalign(p, val); +#endif +} + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#ifdef __LITTLE_ENDIAN +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#endif + +#endif /* __ASM_SH_UNALIGNED_SH4A_H */ diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h index c1641a01d50f..8c0ad5e4487a 100644 --- a/arch/sh/include/asm/unaligned.h +++ b/arch/sh/include/asm/unaligned.h @@ -1,7 +1,11 @@ #ifndef _ASM_SH_UNALIGNED_H #define _ASM_SH_UNALIGNED_H -/* SH can't handle unaligned accesses. */ +#ifdef CONFIG_CPU_SH4A +/* SH-4A can handle unaligned loads in a relatively neutered fashion. */ +#include <asm/unaligned-sh4a.h> +#else +/* Otherwise, SH can't handle unaligned accesses. */ #ifdef __LITTLE_ENDIAN__ # include <linux/unaligned/le_struct.h> # include <linux/unaligned/be_byteshift.h> @@ -15,5 +19,6 @@ # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be #endif +#endif #endif /* _ASM_SH_UNALIGNED_H */ diff --git a/arch/sh/include/cpu-sh3/cpu/gpio.h b/arch/sh/include/cpu-sh3/cpu/gpio.h index 4e53eb314b8f..9a22b882f3dc 100644 --- a/arch/sh/include/cpu-sh3/cpu/gpio.h +++ b/arch/sh/include/cpu-sh3/cpu/gpio.h @@ -62,6 +62,20 @@ #define PORT_PSELC 0xA4050128UL #define PORT_PSELD 0xA405012AUL +#elif defined(CONFIG_CPU_SUBTYPE_SH7709) + +/* Control registers */ +#define PORT_PACR 0xa4000100UL +#define PORT_PBCR 0xa4000102UL +#define PORT_PCCR 0xa4000104UL +#define PORT_PFCR 0xa400010aUL + +/* Data registers */ +#define PORT_PADR 0xa4000120UL +#define PORT_PBDR 0xa4000122UL +#define PORT_PCDR 0xa4000124UL +#define PORT_PFDR 0xa400012aUL + #endif #endif diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h index 5bdc9d9be3de..efc43b323466 100644 --- a/arch/sh/include/mach-common/mach/edosk7705.h +++ b/arch/sh/include/mach-common/mach/edosk7705.h @@ -1,30 +1,7 @@ -/* - * include/asm-sh/edosk7705.h - * - * Modified version of io_se.h for the EDOSK7705 specific functions. - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * IO functions for an Hitachi EDOSK7705 development board - */ - -#ifndef __ASM_SH_EDOSK7705_IO_H -#define __ASM_SH_EDOSK7705_IO_H +#ifndef __ASM_SH_EDOSK7705_H +#define __ASM_SH_EDOSK7705_H +#define __IO_PREFIX sh_edosk7705 #include <asm/io_generic.h> -extern unsigned char sh_edosk7705_inb(unsigned long port); -extern unsigned int sh_edosk7705_inl(unsigned long port); - -extern void sh_edosk7705_outb(unsigned char value, unsigned long port); -extern void sh_edosk7705_outl(unsigned int value, unsigned long port); - -extern void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count); -extern void sh_edosk7705_insl(unsigned long port, void *addr, unsigned long count); -extern void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count); -extern void sh_edosk7705_outsl(unsigned long port, const void *addr, unsigned long count); - -extern unsigned long sh_edosk7705_isa_port2addr(unsigned long offset); - -#endif /* __ASM_SH_EDOSK7705_IO_H */ +#endif /* __ASM_SH_EDOSK7705_H */ diff --git a/arch/sh/include/mach-se/mach/mrshpc.h b/arch/sh/include/mach-se/mach/mrshpc.h new file mode 100644 index 000000000000..56287ee8563a --- /dev/null +++ b/arch/sh/include/mach-se/mach/mrshpc.h @@ -0,0 +1,52 @@ +#ifndef __MACH_SE_MRSHPC_H +#define __MACH_SE_MRSHPC_H + +#include <linux/io.h> + +static inline void __init mrshpc_setup_windows(void) +{ + if ((__raw_readw(MRSHPC_CSR) & 0x000c) != 0) + return; /* Not detected */ + + if ((__raw_readw(MRSHPC_CSR) & 0x0080) == 0) { + __raw_writew(0x0674, MRSHPC_CPWCR); /* Card Vcc is 3.3v? */ + } else { + __raw_writew(0x0678, MRSHPC_CPWCR); /* Card Vcc is 5V */ + } + + /* + * PC-Card window open + * flag == COMMON/ATTRIBUTE/IO + */ + /* common window open */ + __raw_writew(0x8a84, MRSHPC_MW0CR1); + if((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) + /* common mode & bus width 16bit SWAP = 1*/ + __raw_writew(0x0b00, MRSHPC_MW0CR2); + else + /* common mode & bus width 16bit SWAP = 0*/ + __raw_writew(0x0300, MRSHPC_MW0CR2); + + /* attribute window open */ + __raw_writew(0x8a85, MRSHPC_MW1CR1); + if ((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) + /* attribute mode & bus width 16bit SWAP = 1*/ + __raw_writew(0x0a00, MRSHPC_MW1CR2); + else + /* attribute mode & bus width 16bit SWAP = 0*/ + __raw_writew(0x0200, MRSHPC_MW1CR2); + + /* I/O window open */ + __raw_writew(0x8a86, MRSHPC_IOWCR1); + __raw_writew(0x0008, MRSHPC_CDCR); /* I/O card mode */ + if ((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) + __raw_writew(0x0a00, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 1*/ + else + __raw_writew(0x0200, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 0*/ + + __raw_writew(0x2000, MRSHPC_ICR); + __raw_writeb(0x00, PA_MRSHPC_MW2 + 0x206); + __raw_writeb(0x42, PA_MRSHPC_MW2 + 0x200); +} + +#endif /* __MACH_SE_MRSHPC_H */ diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h index eb23000e1bbe..14be91c5a2f0 100644 --- a/arch/sh/include/mach-se/mach/se.h +++ b/arch/sh/include/mach-se/mach/se.h @@ -68,6 +68,24 @@ #define BCR_ILCRF (PA_BCR + 10) #define BCR_ILCRG (PA_BCR + 12) +#if defined(CONFIG_CPU_SUBTYPE_SH7709) +#define INTC_IRR0 0xa4000004UL +#define INTC_IRR1 0xa4000006UL +#define INTC_IRR2 0xa4000008UL + +#define INTC_ICR0 0xfffffee0UL +#define INTC_ICR1 0xa4000010UL +#define INTC_ICR2 0xa4000012UL +#define INTC_INTER 0xa4000014UL + +#define INTC_IPRC 0xa4000016UL +#define INTC_IPRD 0xa4000018UL +#define INTC_IPRE 0xa400001aUL + +#define IRQ0_IRQ 32 +#define IRQ1_IRQ 33 +#endif + #if defined(CONFIG_CPU_SUBTYPE_SH7705) #define IRQ_STNIC 12 #define IRQ_CFCARD 14 diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h index 98458460e632..749914b400fb 100644 --- a/arch/sh/include/mach-se/mach/se7343.h +++ b/arch/sh/include/mach-se/mach/se7343.h @@ -118,9 +118,6 @@ #define FPGA_IN 0xb1400000 #define FPGA_OUT 0xb1400002 -#define __IO_PREFIX sh7343se -#include <asm/io_generic.h> - #define IRQ0_IRQ 32 #define IRQ1_IRQ 33 #define IRQ4_IRQ 36 @@ -132,8 +129,10 @@ #define SE7343_FPGA_IRQ_MRSHPC3 3 #define SE7343_FPGA_IRQ_SMC 6 /* EXT_IRQ2 */ #define SE7343_FPGA_IRQ_USB 8 +#define SE7343_FPGA_IRQ_UARTA 10 +#define SE7343_FPGA_IRQ_UARTB 11 -#define SE7343_FPGA_IRQ_NR 11 +#define SE7343_FPGA_IRQ_NR 12 #define SE7343_FPGA_IRQ_BASE 120 #define MRSHPC_IRQ3 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC3) @@ -142,6 +141,8 @@ #define MRSHPC_IRQ0 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC0) #define SMC_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_SMC) #define USB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_USB) +#define UARTA_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTA) +#define UARTB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTB) /* arch/sh/boards/se/7343/irq.c */ void init_7343se_IRQ(void); diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 48edfb145fb4..2e1b86e16ab5 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -4,25 +4,31 @@ extra-y := head_32.o init_task.o vmlinux.lds -obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ - ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \ - syscalls_32.o time_32.o topology.o traps.o traps_32.o +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o = -pg +endif + +obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \ + machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ + sys_sh.o sys_sh32.o syscalls_32.o time_32.o topology.o \ + traps.o traps_32.o obj-y += cpu/ timers/ obj-$(CONFIG_VSYSCALL) += vsyscall/ obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_CF_ENABLER) += cf-enabler.o obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o -obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o +obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_DUMP_CODE) += disassemble.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index c97660b2b48d..fe425d7f6871 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -1,21 +1,18 @@ extra-y := head_64.o init_task.o vmlinux.lds -obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ +obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ syscalls_64.o time_64.o topology.o traps.o traps_64.o obj-y += cpu/ timers/ obj-$(CONFIG_VSYSCALL) += vsyscall/ obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_CF_ENABLER) += cf-enabler.o obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o -obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o diff --git a/arch/sh/kernel/cf-enabler.c b/arch/sh/kernel/cf-enabler.c deleted file mode 100644 index bea40339919b..000000000000 --- a/arch/sh/kernel/cf-enabler.c +++ /dev/null @@ -1,168 +0,0 @@ -/* $Id: cf-enabler.c,v 1.4 2004/02/22 22:44:36 kkojima Exp $ - * - * linux/drivers/block/cf-enabler.c - * - * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2000 Toshiharu Nozawa - * Copyright (C) 2001 A&D Co., Ltd. - * - * Enable the CF configuration. - */ - -#include <linux/init.h> -#include <linux/mm.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <asm/io.h> -#include <asm/irq.h> - -/* - * You can connect Compact Flash directly to the bus of SuperH. - * This is the enabler for that. - * - * SIM: How generic is this really? It looks pretty board, or at - * least SH sub-type, specific to me. - * I know it doesn't work on the Overdrive! - */ - -/* - * 0xB8000000 : Attribute - * 0xB8001000 : Common Memory - * 0xBA000000 : I/O - */ -#if defined(CONFIG_CPU_SH4) -/* SH4 can't access PCMCIA interface through P2 area. - * we must remap it with appropriate attribute bit of the page set. - * this part is based on Greg Banks' hd64465_ss.c implementation - Masahiro Abe */ - -#if defined(CONFIG_CF_AREA6) -#define slot_no 0 -#else -#define slot_no 1 -#endif - -/* use this pointer to access to directly connected compact flash io area*/ -void *cf_io_base; - -static int __init allocate_cf_area(void) -{ - pgprot_t prot; - unsigned long paddrbase, psize; - - /* open I/O area window */ - paddrbase = virt_to_phys((void*)CONFIG_CF_BASE_ADDR); - psize = PAGE_SIZE; - prot = PAGE_KERNEL_PCC(slot_no, _PAGE_PCC_IO16); - cf_io_base = p3_ioremap(paddrbase, psize, prot.pgprot); - if (!cf_io_base) { - printk("allocate_cf_area : can't open CF I/O window!\n"); - return -ENOMEM; - } -/* printk("p3_ioremap(paddr=0x%08lx, psize=0x%08lx, prot=0x%08lx)=0x%08lx\n", - paddrbase, psize, prot.pgprot, cf_io_base);*/ - - /* XXX : do we need attribute and common-memory area also? */ - - return 0; -} -#endif - -static int __init cf_init_default(void) -{ -/* You must have enabled the card, and set the level interrupt - * before reaching this point. Possibly in boot ROM or boot loader. - */ -#if defined(CONFIG_CPU_SH4) - allocate_cf_area(); -#endif - - return 0; -} - -#if defined(CONFIG_SH_SOLUTION_ENGINE) -#include <mach-se/mach/se.h> -#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE) -#include <mach-se/mach/se7722.h> -#elif defined(CONFIG_SH_7721_SOLUTION_ENGINE) -#include <mach-se/mach/se7721.h> -#endif - -/* - * SolutionEngine Seriese - * - * about MS770xSE - * 0xB8400000 : Common Memory - * 0xB8500000 : Attribute - * 0xB8600000 : I/O - * - * about MS7722SE - * 0xB0400000 : Common Memory - * 0xB0500000 : Attribute - * 0xB0600000 : I/O - */ - -#if defined(CONFIG_SH_SOLUTION_ENGINE) || \ - defined(CONFIG_SH_7722_SOLUTION_ENGINE) || \ - defined(CONFIG_SH_7721_SOLUTION_ENGINE) -static int __init cf_init_se(void) -{ - if ((ctrl_inw(MRSHPC_CSR) & 0x000c) != 0) - return 0; /* Not detected */ - - if ((ctrl_inw(MRSHPC_CSR) & 0x0080) == 0) { - ctrl_outw(0x0674, MRSHPC_CPWCR); /* Card Vcc is 3.3v? */ - } else { - ctrl_outw(0x0678, MRSHPC_CPWCR); /* Card Vcc is 5V */ - } - - /* - * PC-Card window open - * flag == COMMON/ATTRIBUTE/IO - */ - /* common window open */ - ctrl_outw(0x8a84, MRSHPC_MW0CR1); - if((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0) - /* common mode & bus width 16bit SWAP = 1*/ - ctrl_outw(0x0b00, MRSHPC_MW0CR2); - else - /* common mode & bus width 16bit SWAP = 0*/ - ctrl_outw(0x0300, MRSHPC_MW0CR2); - - /* attribute window open */ - ctrl_outw(0x8a85, MRSHPC_MW1CR1); - if ((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0) - /* attribute mode & bus width 16bit SWAP = 1*/ - ctrl_outw(0x0a00, MRSHPC_MW1CR2); - else - /* attribute mode & bus width 16bit SWAP = 0*/ - ctrl_outw(0x0200, MRSHPC_MW1CR2); - - /* I/O window open */ - ctrl_outw(0x8a86, MRSHPC_IOWCR1); - ctrl_outw(0x0008, MRSHPC_CDCR); /* I/O card mode */ - if ((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0) - ctrl_outw(0x0a00, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 1*/ - else - ctrl_outw(0x0200, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 0*/ - - ctrl_outw(0x2000, MRSHPC_ICR); - ctrl_outb(0x00, PA_MRSHPC_MW2 + 0x206); - ctrl_outb(0x42, PA_MRSHPC_MW2 + 0x200); - return 0; -} -#else -static int __init cf_init_se(void) -{ - return -1; -} -#endif - -static int __init cf_init(void) -{ - if (mach_is_se() || mach_is_7722se() || mach_is_7721se()) - return cf_init_se(); - - return cf_init_default(); -} - -__initcall (cf_init); diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index b7e46d5bba43..7b17137536d6 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -117,6 +117,11 @@ int clk_enable(struct clk *clk) unsigned long flags; int ret; + if (!clk) + return -EINVAL; + + clk_enable(clk->parent); + spin_lock_irqsave(&clock_lock, flags); ret = __clk_enable(clk); spin_unlock_irqrestore(&clock_lock, flags); @@ -147,9 +152,14 @@ void clk_disable(struct clk *clk) { unsigned long flags; + if (!clk) + return; + spin_lock_irqsave(&clock_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clock_lock, flags); + + clk_disable(clk->parent); } EXPORT_SYMBOL_GPL(clk_disable); diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 75fb03d35670..d29e69c156f0 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -261,9 +261,11 @@ asmlinkage void __init sh_cpu_init(void) cache_init(); if (raw_smp_processor_id() == 0) { +#ifdef CONFIG_MMU shm_align_mask = max_t(unsigned long, current_cpu_data.dcache.way_size - 1, PAGE_SIZE - 1); +#endif /* Boot CPU sets the cache shape */ detect_cache_shape(); diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile index 428450cc0809..45f85c77ef75 100644 --- a/arch/sh/kernel/cpu/sh2a/Makefile +++ b/arch/sh/kernel/cpu/sh2a/Makefile @@ -8,9 +8,10 @@ common-y += ex.o entry.o obj-$(CONFIG_SH_FPU) += fpu.o -obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o +obj-$(CONFIG_CPU_SUBTYPE_SH7201) += setup-sh7201.o clock-sh7201.o obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o +obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o obj-$(CONFIG_CPU_SUBTYPE_MXG) += setup-mxg.o clock-sh7206.o # Pinmux setup diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c new file mode 100644 index 000000000000..020a96fe961a --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c @@ -0,0 +1,85 @@ +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7201.c + * + * SH7201 support for the clock framework + * + * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +const static int pll1rate[]={1,2,3,4,6,8}; +const static int pfc_divisors[]={1,2,3,4,6,8,12}; +#define ifc_divisors pfc_divisors + +#if (CONFIG_SH_CLK_MD == 0) +#define PLL2 (4) +#elif (CONFIG_SH_CLK_MD == 2) +#define PLL2 (2) +#elif (CONFIG_SH_CLK_MD == 3) +#define PLL2 (1) +#else +#error "Illegal Clock Mode!" +#endif + +static void master_clk_init(struct clk *clk) +{ + clk->rate = 10000000 * PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; +} + +static struct clk_ops sh7201_master_clk_ops = { + .init = master_clk_init, +}; + +static void module_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(FREQCR) & 0x0007); + clk->rate = clk->parent->rate / pfc_divisors[idx]; +} + +static struct clk_ops sh7201_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static void bus_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(FREQCR) & 0x0007); + clk->rate = clk->parent->rate / pfc_divisors[idx]; +} + +static struct clk_ops sh7201_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_recalc(struct clk *clk) +{ + int idx = ((ctrl_inw(FREQCR) >> 4) & 0x0007); + clk->rate = clk->parent->rate / ifc_divisors[idx]; +} + +static struct clk_ops sh7201_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh7201_clk_ops[] = { + &sh7201_master_clk_ops, + &sh7201_module_clk_ops, + &sh7201_bus_clk_ops, + &sh7201_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7201_clk_ops)) + *ops = sh7201_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c index 6e79132f6f30..e098e2f6aa08 100644 --- a/arch/sh/kernel/cpu/sh2a/probe.c +++ b/arch/sh/kernel/cpu/sh2a/probe.c @@ -18,16 +18,17 @@ int __init detect_cpu_and_cache_system(void) /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */ boot_cpu_data.flags |= CPU_HAS_OP32; -#if defined(CONFIG_CPU_SUBTYPE_SH7203) +#if defined(CONFIG_CPU_SUBTYPE_SH7201) + boot_cpu_data.type = CPU_SH7201; + boot_cpu_data.flags |= CPU_HAS_FPU; +#elif defined(CONFIG_CPU_SUBTYPE_SH7203) boot_cpu_data.type = CPU_SH7203; - /* SH7203 has an FPU.. */ boot_cpu_data.flags |= CPU_HAS_FPU; #elif defined(CONFIG_CPU_SUBTYPE_SH7263) boot_cpu_data.type = CPU_SH7263; boot_cpu_data.flags |= CPU_HAS_FPU; #elif defined(CONFIG_CPU_SUBTYPE_SH7206) boot_cpu_data.type = CPU_SH7206; - /* While SH7206 has a DSP.. */ boot_cpu_data.flags |= CPU_HAS_DSP; #elif defined(CONFIG_CPU_SUBTYPE_MXG) boot_cpu_data.type = CPU_MXG; diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c new file mode 100644 index 000000000000..0631e421c022 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -0,0 +1,331 @@ +/* + * SH7201 setup + * + * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + ADC_ADI, + MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D, + MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F, + MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U, + MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U, + MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V, + MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V, + MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W, + RTC_ARM, RTC_PRD, RTC_CUP, + WDT, + IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI, + IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI, + IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI, + + DMAC0_DMINT0, DMAC1_DMINT1, + DMAC2_DMINT2, DMAC3_DMINT3, + + SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, + SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, + SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, + SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, + SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI, + SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI, + SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI, + SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI, + + DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6, + DMAC7_DMINT7, + + RCAN0_ERS, RCAN0_OVR, + RCAN0_SLE, + RCAN0_RM0, RCAN0_RM1, + + RCAN1_ERS, RCAN1_OVR, + RCAN1_SLE, + RCAN1_RM0, RCAN1_RM1, + + SSI0_SSII, SSI1_SSII, + + TMR0_CMIA0, TMR0_CMIB0, TMR0_OVI0, + TMR1_CMIA1, TMR1_CMIB1, TMR1_OVI1, + + /* interrupt groups */ + + IRQ, PINT, ADC, + MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU, + MTU23_ABCD, MTU24_ABCD, MTU25_UVW, + RTC, IIC30, IIC31, IIC32, + SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, + RCAN0, RCAN1, TMR0, TMR1 + +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + + INTC_IRQ(ADC_ADI, 92), + + INTC_IRQ(MTU2_TGI0A, 108), INTC_IRQ(MTU2_TGI0B, 109), + INTC_IRQ(MTU2_TGI0C, 110), INTC_IRQ(MTU2_TGI0D, 111), + INTC_IRQ(MTU2_TCI0V, 112), + INTC_IRQ(MTU2_TGI0E, 113), INTC_IRQ(MTU2_TGI0F, 114), + + INTC_IRQ(MTU2_TGI1A, 116), INTC_IRQ(MTU2_TGI1B, 117), + INTC_IRQ(MTU2_TCI1V, 120), INTC_IRQ(MTU2_TCI1U, 121), + + INTC_IRQ(MTU2_TGI2A, 124), INTC_IRQ(MTU2_TGI2B, 125), + INTC_IRQ(MTU2_TCI2V, 128), INTC_IRQ(MTU2_TCI2U, 129), + + INTC_IRQ(MTU2_TGI3A, 132), INTC_IRQ(MTU2_TGI3B, 133), + INTC_IRQ(MTU2_TGI3C, 134), INTC_IRQ(MTU2_TGI3D, 135), + INTC_IRQ(MTU2_TCI3V, 136), + + INTC_IRQ(MTU2_TGI4A, 140), INTC_IRQ(MTU2_TGI4B, 141), + INTC_IRQ(MTU2_TGI4C, 142), INTC_IRQ(MTU2_TGI4D, 143), + INTC_IRQ(MTU2_TCI4V, 144), + + INTC_IRQ(MTU2_TGI5U, 148), INTC_IRQ(MTU2_TGI5V, 149), + INTC_IRQ(MTU2_TGI5W, 150), + + INTC_IRQ(RTC_ARM, 152), INTC_IRQ(RTC_PRD, 153), + INTC_IRQ(RTC_CUP, 154), INTC_IRQ(WDT, 156), + + INTC_IRQ(IIC30_STPI, 157), INTC_IRQ(IIC30_NAKI, 158), + INTC_IRQ(IIC30_RXI, 159), INTC_IRQ(IIC30_TXI, 160), + INTC_IRQ(IIC30_TEI, 161), + + INTC_IRQ(IIC31_STPI, 164), INTC_IRQ(IIC31_NAKI, 165), + INTC_IRQ(IIC31_RXI, 166), INTC_IRQ(IIC31_TXI, 167), + INTC_IRQ(IIC31_TEI, 168), + + INTC_IRQ(IIC32_STPI, 170), INTC_IRQ(IIC32_NAKI, 171), + INTC_IRQ(IIC32_RXI, 172), INTC_IRQ(IIC32_TXI, 173), + INTC_IRQ(IIC32_TEI, 174), + + INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177), + INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179), + + INTC_IRQ(SCIF0_BRI, 180), INTC_IRQ(SCIF0_ERI, 181), + INTC_IRQ(SCIF0_RXI, 182), INTC_IRQ(SCIF0_TXI, 183), + INTC_IRQ(SCIF1_BRI, 184), INTC_IRQ(SCIF1_ERI, 185), + INTC_IRQ(SCIF1_RXI, 186), INTC_IRQ(SCIF1_TXI, 187), + INTC_IRQ(SCIF2_BRI, 188), INTC_IRQ(SCIF2_ERI, 189), + INTC_IRQ(SCIF2_RXI, 190), INTC_IRQ(SCIF2_TXI, 191), + INTC_IRQ(SCIF3_BRI, 192), INTC_IRQ(SCIF3_ERI, 193), + INTC_IRQ(SCIF3_RXI, 194), INTC_IRQ(SCIF3_TXI, 195), + INTC_IRQ(SCIF4_BRI, 196), INTC_IRQ(SCIF4_ERI, 197), + INTC_IRQ(SCIF4_RXI, 198), INTC_IRQ(SCIF4_TXI, 199), + INTC_IRQ(SCIF5_BRI, 200), INTC_IRQ(SCIF5_ERI, 201), + INTC_IRQ(SCIF5_RXI, 202), INTC_IRQ(SCIF5_TXI, 203), + INTC_IRQ(SCIF6_BRI, 204), INTC_IRQ(SCIF6_ERI, 205), + INTC_IRQ(SCIF6_RXI, 206), INTC_IRQ(SCIF6_TXI, 207), + INTC_IRQ(SCIF7_BRI, 208), INTC_IRQ(SCIF7_ERI, 209), + INTC_IRQ(SCIF7_RXI, 210), INTC_IRQ(SCIF7_TXI, 211), + + INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216), + INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218), + INTC_IRQ(DMAC7_DMINT7, 219), + + INTC_IRQ(RCAN0_ERS, 228), INTC_IRQ(RCAN0_OVR, 229), + INTC_IRQ(RCAN0_SLE, 230), + INTC_IRQ(RCAN0_RM0, 231), INTC_IRQ(RCAN0_RM1, 232), + + INTC_IRQ(RCAN1_ERS, 234), INTC_IRQ(RCAN1_OVR, 235), + INTC_IRQ(RCAN1_SLE, 236), + INTC_IRQ(RCAN1_RM0, 237), INTC_IRQ(RCAN1_RM1, 238), + + INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245), + + INTC_IRQ(TMR0_CMIA0, 246), INTC_IRQ(TMR0_CMIB0, 247), + INTC_IRQ(TMR0_OVI0, 248), + + INTC_IRQ(TMR1_CMIA1, 252), INTC_IRQ(TMR1_CMIB1, 253), + INTC_IRQ(TMR1_OVI1, 254), + +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), + INTC_GROUP(MTU20_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D), + INTC_GROUP(MTU20_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F), + + INTC_GROUP(MTU21_AB, MTU2_TGI1A, MTU2_TGI1B), + INTC_GROUP(MTU21_VU, MTU2_TCI1V, MTU2_TCI1U), + INTC_GROUP(MTU22_AB, MTU2_TGI2A, MTU2_TGI2B), + INTC_GROUP(MTU22_VU, MTU2_TCI2V, MTU2_TCI2U), + INTC_GROUP(MTU23_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D), + INTC_GROUP(MTU24_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D), + INTC_GROUP(MTU25_UVW, MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W), + INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP ), + + INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, + IIC30_TEI), + INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, + IIC31_TEI), + INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, + IIC32_TEI), + + INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), + INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), + INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), + INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), + INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI), + INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI), + INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI), + INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI), + + INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, + RCAN0_SLE), + INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, + RCAN1_SLE), + + INTC_GROUP(TMR0, TMR0_CMIA0, TMR0_CMIB0, TMR0_OVI0), + INTC_GROUP(TMR1, TMR1_CMIA1, TMR1_CMIB1, TMR1_OVI1), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffe9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffe941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffe9420, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI, 0 } }, + { 0xfffe9800, 0, 16, 4, /* IPR06 */ { 0, MTU20_ABCD, MTU20_VEF, MTU21_AB } }, + { 0xfffe9802, 0, 16, 4, /* IPR07 */ { MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD } }, + { 0xfffe9804, 0, 16, 4, /* IPR08 */ { MTU2_TCI3V, MTU24_ABCD, MTU2_TCI4V, MTU25_UVW } }, + + { 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } }, + { 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } }, + { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0 , SCIF1 } }, + { 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } }, + { 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } }, + { 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } }, + { 0xfffe9812, 0, 16, 4, /* IPR15 */ { 0, RCAN0, RCAN1, 0 } }, + { 0xfffe9814, 0, 16, 4, /* IPR16 */ { SSI0_SSII, SSI1_SSII, TMR0, TMR1 } }, +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffe9408, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xfffe8000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 181, 182, 183, 180} + }, { + .mapbase = 0xfffe8800, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 185, 186, 187, 184} + }, { + .mapbase = 0xfffe9000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 189, 186, 187, 188} + }, { + .mapbase = 0xfffe9800, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 193, 194, 195, 192} + }, { + .mapbase = 0xfffea000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 196, 198, 199, 196} + }, { + .mapbase = 0xfffea800, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 201, 202, 203, 200} + }, { + .mapbase = 0xfffeb000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 205, 206, 207, 204} + }, { + .mapbase = 0xfffeb800, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 209, 210, 211, 208} + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffff0800, + .end = 0xffff2000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Period IRQ */ + .start = 153, + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* Carry IRQ */ + .start = 154, + .flags = IORESOURCE_IRQ, + }, + [3] = { + /* Alarm IRQ */ + .start = 152, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct platform_device *sh7201_devices[] __initdata = { + &sci_device, + &rtc_device, +}; + +static int __init sh7201_devices_setup(void) +{ + return platform_add_devices(sh7201_devices, + ARRAY_SIZE(sh7201_devices)); +} +__initcall(sh7201_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 3fe482dd05c1..b4106d0c68ec 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -52,7 +52,7 @@ * syscall # * */ -#if defined(CONFIG_KGDB_NMI) +#if defined(CONFIG_KGDB) NMI_VEC = 0x1c0 ! Must catch early for debounce #endif @@ -307,7 +307,7 @@ skip_restore: 6: or k0, k2 ! Set the IMASK-bits ldc k2, ssr ! -#if defined(CONFIG_KGDB_NMI) +#if defined(CONFIG_KGDB) ! Clear in_nmi mov.l 6f, k0 mov #0, k1 @@ -320,7 +320,7 @@ skip_restore: .align 2 5: .long 0x00001000 ! DSP -#ifdef CONFIG_KGDB_NMI +#ifdef CONFIG_KGDB 6: .long in_nmi #endif 7: .long 0x30000000 @@ -376,9 +376,9 @@ tlb_miss: ! .balign 512,0,512 interrupt: - mov.l 2f, k2 mov.l 3f, k3 -#if defined(CONFIG_KGDB_NMI) +#if defined(CONFIG_KGDB) + mov.l 2f, k2 ! Debounce (filter nested NMI) mov.l @k2, k0 mov.l 5f, k1 @@ -390,16 +390,16 @@ interrupt: rte nop .align 2 +2: .long INTEVT 5: .long NMI_VEC 6: .long in_nmi 0: -#endif /* defined(CONFIG_KGDB_NMI) */ +#endif /* defined(CONFIG_KGDB) */ bra handle_exception mov #-1, k2 ! interrupt exception marker .align 2 1: .long EXPEVT -2: .long INTEVT 3: .long ret_from_irq 4: .long ret_from_exception diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index dac429726899..e5a0de39a2db 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S @@ -26,7 +26,7 @@ #define fpu_error_trap_handler exception_error #endif -#if !defined(CONFIG_KGDB_NMI) +#if !defined(CONFIG_KGDB) #define kgdb_handle_exception exception_error #endif diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c index 2b747f3b02bd..42edf2e54e85 100644 --- a/arch/sh/kernel/cpu/sh4/softfloat.c +++ b/arch/sh/kernel/cpu/sh4/softfloat.c @@ -37,6 +37,7 @@ */ #include <linux/kernel.h> #include <cpu/fpu.h> +#include <asm/div64.h> #define LIT64( a ) a##LL @@ -67,16 +68,16 @@ typedef unsigned long long float64; extern void float_raise(unsigned int flags); /* in fpu.c */ extern int float_rounding_mode(void); /* in fpu.c */ -inline bits64 extractFloat64Frac(float64 a); -inline flag extractFloat64Sign(float64 a); -inline int16 extractFloat64Exp(float64 a); -inline int16 extractFloat32Exp(float32 a); -inline flag extractFloat32Sign(float32 a); -inline bits32 extractFloat32Frac(float32 a); -inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); -inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); -inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig); -inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr); +bits64 extractFloat64Frac(float64 a); +flag extractFloat64Sign(float64 a); +int16 extractFloat64Exp(float64 a); +int16 extractFloat32Exp(float32 a); +flag extractFloat32Sign(float32 a); +bits32 extractFloat32Frac(float32 a); +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig); +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr); float64 float64_sub(float64 a, float64 b); float32 float32_sub(float32 a, float32 b); float32 float32_add(float32 a, float32 b); @@ -86,11 +87,11 @@ float32 float32_div(float32 a, float32 b); float32 float32_mul(float32 a, float32 b); float64 float64_mul(float64 a, float64 b); float32 float64_to_float32(float64 a); -inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr); -inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, +void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr); -inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); static int8 countLeadingZeros32(bits32 a); static int8 countLeadingZeros64(bits64 a); @@ -110,42 +111,42 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b); static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr, bits32 * zSigPtr); -inline bits64 extractFloat64Frac(float64 a) +bits64 extractFloat64Frac(float64 a) { return a & LIT64(0x000FFFFFFFFFFFFF); } -inline flag extractFloat64Sign(float64 a) +flag extractFloat64Sign(float64 a) { return a >> 63; } -inline int16 extractFloat64Exp(float64 a) +int16 extractFloat64Exp(float64 a) { return (a >> 52) & 0x7FF; } -inline int16 extractFloat32Exp(float32 a) +int16 extractFloat32Exp(float32 a) { return (a >> 23) & 0xFF; } -inline flag extractFloat32Sign(float32 a) +flag extractFloat32Sign(float32 a) { return a >> 31; } -inline bits32 extractFloat32Frac(float32 a) +bits32 extractFloat32Frac(float32 a) { return a & 0x007FFFFF; } -inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) { return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig; } -inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) { bits64 z; @@ -338,12 +339,12 @@ static float64 addFloat64Sigs(float64 a, float64 b, flag zSign) } -inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig) +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig) { return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig; } -inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr) +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr) { bits32 z; if (count == 0) { @@ -634,7 +635,7 @@ normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr) *zExpPtr = 1 - shiftCount; } -inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr) { bits64 z1; @@ -644,7 +645,7 @@ inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, *z0Ptr = a0 + b0 + (z1 < a1); } -inline void +void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr) { @@ -656,11 +657,14 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) { bits64 b0, b1; bits64 rem0, rem1, term0, term1; - bits64 z; + bits64 z, tmp; if (b <= a0) return LIT64(0xFFFFFFFFFFFFFFFF); b0 = b >> 32; - z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32; + tmp = a0; + do_div(tmp, b0); + + z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32; mul64To128(b, z, &term0, &term1); sub128(a0, a1, term0, term1, &rem0, &rem1); while (((sbits64) rem0) < 0) { @@ -669,11 +673,13 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) add128(rem0, rem1, b0, b1, &rem0, &rem1); } rem0 = (rem0 << 32) | (rem1 >> 32); - z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0; + tmp = rem0; + do_div(tmp, b0); + z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp; return z; } -inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) { bits32 aHigh, aLow, bHigh, bLow; bits64 z0, zMiddleA, zMiddleB, z1; @@ -769,7 +775,8 @@ float32 float32_div(float32 a, float32 b) { flag aSign, bSign, zSign; int16 aExp, bExp, zExp; - bits32 aSig, bSig, zSig; + bits32 aSig, bSig; + uint64_t zSig; aSig = extractFloat32Frac(a); aExp = extractFloat32Exp(a); @@ -804,11 +811,13 @@ float32 float32_div(float32 a, float32 b) aSig >>= 1; ++zExp; } - zSig = (((bits64) aSig) << 32) / bSig; + zSig = (((bits64) aSig) << 32); + do_div(zSig, bSig); + if ((zSig & 0x3F) == 0) { zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32); } - return roundAndPackFloat32(zSign, zExp, zSig); + return roundAndPackFloat32(zSign, zExp, (bits32)zSig); } diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index db913855c2fd..0e174af21874 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -229,7 +229,7 @@ struct frqcr_context sh7722_get_clk_context(const char *name) } /** - * sh7722_find_divisors - find divisor for setting rate + * sh7722_find_div_index - find divisor for setting rate * * All sh7722 clocks use the same set of multipliers/divisors. This function * chooses correct divisor to set the rate of clock with parent clock that @@ -238,7 +238,7 @@ struct frqcr_context sh7722_get_clk_context(const char *name) * @parent_rate: rate of parent clock * @rate: requested rate to be set */ -static int sh7722_find_divisors(unsigned long parent_rate, unsigned rate) +static int sh7722_find_div_index(unsigned long parent_rate, unsigned rate) { unsigned div2 = parent_rate * 2 / rate; int index; @@ -247,12 +247,12 @@ static int sh7722_find_divisors(unsigned long parent_rate, unsigned rate) return -EINVAL; for (index = 1; index < ARRAY_SIZE(divisors2); index++) { - if (div2 > divisors2[index] && div2 <= divisors2[index]) + if (div2 > divisors2[index - 1] && div2 <= divisors2[index]) break; } if (index >= ARRAY_SIZE(divisors2)) index = ARRAY_SIZE(divisors2) - 1; - return divisors2[index]; + return index; } static void sh7722_frqcr_recalc(struct clk *clk) @@ -279,12 +279,12 @@ static int sh7722_frqcr_set_rate(struct clk *clk, unsigned long rate, return -EINVAL; /* look for multiplier/divisor pair */ - div = sh7722_find_divisors(parent_rate, rate); + div = sh7722_find_div_index(parent_rate, rate); if (div<0) return div; /* calculate new value of clock rate */ - clk->rate = parent_rate * 2 / div; + clk->rate = parent_rate * 2 / divisors2[div]; frqcr = ctrl_inl(FRQCR); /* FIXME: adjust as algo_id specifies */ @@ -353,7 +353,7 @@ static int sh7722_frqcr_set_rate(struct clk *clk, unsigned long rate, int part_div; if (likely(!err)) { - part_div = sh7722_find_divisors(parent_rate, + part_div = sh7722_find_div_index(parent_rate, rate); if (part_div > 0) { part_ctx = sh7722_get_clk_context( @@ -394,12 +394,12 @@ static long sh7722_frqcr_round_rate(struct clk *clk, unsigned long rate) int div; /* look for multiplier/divisor pair */ - div = sh7722_find_divisors(parent_rate, rate); + div = sh7722_find_div_index(parent_rate, rate); if (div < 0) return clk->rate; /* calculate new value of clock rate */ - return parent_rate * 2 / div; + return parent_rate * 2 / divisors2[div]; } static struct clk_ops sh7722_frqcr_clk_ops = { @@ -421,7 +421,7 @@ static int sh7722_siu_set_rate(struct clk *clk, unsigned long rate, int algo_id) int div; r = ctrl_inl(clk->arch_flags); - div = sh7722_find_divisors(clk->parent->rate, rate); + div = sh7722_find_div_index(clk->parent->rate, rate); if (div < 0) return div; r = (r & ~0xF) | div; @@ -516,16 +516,19 @@ static struct clk_ops sh7722_video_clk_ops = { static struct clk sh7722_umem_clock = { .name = "umem_clk", .ops = &sh7722_frqcr_clk_ops, + .flags = CLK_RATE_PROPAGATES, }; static struct clk sh7722_sh_clock = { .name = "sh_clk", .ops = &sh7722_frqcr_clk_ops, + .flags = CLK_RATE_PROPAGATES, }; static struct clk sh7722_peripheral_clock = { .name = "peripheral_clk", .ops = &sh7722_frqcr_clk_ops, + .flags = CLK_RATE_PROPAGATES, }; static struct clk sh7722_sdram_clock = { @@ -533,6 +536,11 @@ static struct clk sh7722_sdram_clock = { .ops = &sh7722_frqcr_clk_ops, }; +static struct clk sh7722_r_clock = { + .name = "r_clk", + .rate = 32768, + .flags = CLK_RATE_PROPAGATES, +}; #ifndef CONFIG_CPU_SUBTYPE_SH7343 @@ -567,12 +575,30 @@ static struct clk sh7722_video_clock = { .ops = &sh7722_video_clk_ops, }; -static int sh7722_mstpcr_start_stop(struct clk *clk, unsigned long reg, - int enable) +#define MSTPCR_ARCH_FLAGS(reg, bit) (((reg) << 8) | (bit)) +#define MSTPCR_ARCH_FLAGS_REG(value) ((value) >> 8) +#define MSTPCR_ARCH_FLAGS_BIT(value) ((value) & 0xff) + +static int sh7722_mstpcr_start_stop(struct clk *clk, int enable) { - unsigned long bit = clk->arch_flags; + unsigned long bit = MSTPCR_ARCH_FLAGS_BIT(clk->arch_flags); + unsigned long reg; unsigned long r; + switch(MSTPCR_ARCH_FLAGS_REG(clk->arch_flags)) { + case 0: + reg = MSTPCR0; + break; + case 1: + reg = MSTPCR1; + break; + case 2: + reg = MSTPCR2; + break; + default: + return -EINVAL; + } + r = ctrl_inl(reg); if (enable) @@ -584,96 +610,175 @@ static int sh7722_mstpcr_start_stop(struct clk *clk, unsigned long reg, return 0; } -static void sh7722_mstpcr0_enable(struct clk *clk) -{ - sh7722_mstpcr_start_stop(clk, MSTPCR0, 1); -} - -static void sh7722_mstpcr0_disable(struct clk *clk) -{ - sh7722_mstpcr_start_stop(clk, MSTPCR0, 0); -} - -static void sh7722_mstpcr1_enable(struct clk *clk) -{ - sh7722_mstpcr_start_stop(clk, MSTPCR1, 1); -} - -static void sh7722_mstpcr1_disable(struct clk *clk) +static void sh7722_mstpcr_enable(struct clk *clk) { - sh7722_mstpcr_start_stop(clk, MSTPCR1, 0); + sh7722_mstpcr_start_stop(clk, 1); } -static void sh7722_mstpcr2_enable(struct clk *clk) +static void sh7722_mstpcr_disable(struct clk *clk) { - sh7722_mstpcr_start_stop(clk, MSTPCR2, 1); + sh7722_mstpcr_start_stop(clk, 0); } -static void sh7722_mstpcr2_disable(struct clk *clk) +static void sh7722_mstpcr_recalc(struct clk *clk) { - sh7722_mstpcr_start_stop(clk, MSTPCR2, 0); + if (clk->parent) + clk->rate = clk->parent->rate; } -static struct clk_ops sh7722_mstpcr0_clk_ops = { - .enable = sh7722_mstpcr0_enable, - .disable = sh7722_mstpcr0_disable, -}; - -static struct clk_ops sh7722_mstpcr1_clk_ops = { - .enable = sh7722_mstpcr1_enable, - .disable = sh7722_mstpcr1_disable, +static struct clk_ops sh7722_mstpcr_clk_ops = { + .enable = sh7722_mstpcr_enable, + .disable = sh7722_mstpcr_disable, + .recalc = sh7722_mstpcr_recalc, }; -static struct clk_ops sh7722_mstpcr2_clk_ops = { - .enable = sh7722_mstpcr2_enable, - .disable = sh7722_mstpcr2_disable, -}; - -#define DECLARE_MSTPCRN(regnr, bitnr, bitstr) \ -{ \ - .name = "mstp" __stringify(regnr) bitstr, \ - .arch_flags = bitnr, \ - .ops = &sh7722_mstpcr ## regnr ## _clk_ops, \ +#define MSTPCR(_name, _parent, regnr, bitnr) \ +{ \ + .name = _name, \ + .arch_flags = MSTPCR_ARCH_FLAGS(regnr, bitnr), \ + .ops = (void *)_parent, \ } -#define DECLARE_MSTPCR(regnr) \ - DECLARE_MSTPCRN(regnr, 31, "31"), \ - DECLARE_MSTPCRN(regnr, 30, "30"), \ - DECLARE_MSTPCRN(regnr, 29, "29"), \ - DECLARE_MSTPCRN(regnr, 28, "28"), \ - DECLARE_MSTPCRN(regnr, 27, "27"), \ - DECLARE_MSTPCRN(regnr, 26, "26"), \ - DECLARE_MSTPCRN(regnr, 25, "25"), \ - DECLARE_MSTPCRN(regnr, 24, "24"), \ - DECLARE_MSTPCRN(regnr, 23, "23"), \ - DECLARE_MSTPCRN(regnr, 22, "22"), \ - DECLARE_MSTPCRN(regnr, 21, "21"), \ - DECLARE_MSTPCRN(regnr, 20, "20"), \ - DECLARE_MSTPCRN(regnr, 19, "19"), \ - DECLARE_MSTPCRN(regnr, 18, "18"), \ - DECLARE_MSTPCRN(regnr, 17, "17"), \ - DECLARE_MSTPCRN(regnr, 16, "16"), \ - DECLARE_MSTPCRN(regnr, 15, "15"), \ - DECLARE_MSTPCRN(regnr, 14, "14"), \ - DECLARE_MSTPCRN(regnr, 13, "13"), \ - DECLARE_MSTPCRN(regnr, 12, "12"), \ - DECLARE_MSTPCRN(regnr, 11, "11"), \ - DECLARE_MSTPCRN(regnr, 10, "10"), \ - DECLARE_MSTPCRN(regnr, 9, "09"), \ - DECLARE_MSTPCRN(regnr, 8, "08"), \ - DECLARE_MSTPCRN(regnr, 7, "07"), \ - DECLARE_MSTPCRN(regnr, 6, "06"), \ - DECLARE_MSTPCRN(regnr, 5, "05"), \ - DECLARE_MSTPCRN(regnr, 4, "04"), \ - DECLARE_MSTPCRN(regnr, 3, "03"), \ - DECLARE_MSTPCRN(regnr, 2, "02"), \ - DECLARE_MSTPCRN(regnr, 1, "01"), \ - DECLARE_MSTPCRN(regnr, 0, "00") - -static struct clk sh7722_mstpcr[] = { - DECLARE_MSTPCR(0), - DECLARE_MSTPCR(1), - DECLARE_MSTPCR(2), +static struct clk sh7722_mstpcr_clocks[] = { +#if defined(CONFIG_CPU_SUBTYPE_SH7722) + MSTPCR("uram0", "umem_clk", 0, 28), + MSTPCR("xymem0", "bus_clk", 0, 26), + MSTPCR("tmu0", "peripheral_clk", 0, 15), + MSTPCR("cmt0", "r_clk", 0, 14), + MSTPCR("rwdt0", "r_clk", 0, 13), + MSTPCR("flctl0", "peripheral_clk", 0, 10), + MSTPCR("scif0", "peripheral_clk", 0, 7), + MSTPCR("scif1", "peripheral_clk", 0, 6), + MSTPCR("scif2", "peripheral_clk", 0, 5), + MSTPCR("i2c0", "peripheral_clk", 1, 9), + MSTPCR("rtc0", "r_clk", 1, 8), + MSTPCR("sdhi0", "peripheral_clk", 2, 18), + MSTPCR("keysc0", "r_clk", 2, 14), + MSTPCR("usbf0", "peripheral_clk", 2, 11), + MSTPCR("2dg0", "bus_clk", 2, 9), + MSTPCR("siu0", "bus_clk", 2, 8), + MSTPCR("vou0", "bus_clk", 2, 5), + MSTPCR("jpu0", "bus_clk", 2, 6), + MSTPCR("beu0", "bus_clk", 2, 4), + MSTPCR("ceu0", "bus_clk", 2, 3), + MSTPCR("veu0", "bus_clk", 2, 2), + MSTPCR("vpu0", "bus_clk", 2, 1), + MSTPCR("lcdc0", "bus_clk", 2, 0), +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7723) + /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */ + MSTPCR("tlb0", "cpu_clk", 0, 31), + MSTPCR("ic0", "cpu_clk", 0, 30), + MSTPCR("oc0", "cpu_clk", 0, 29), + MSTPCR("l2c0", "sh_clk", 0, 28), + MSTPCR("ilmem0", "cpu_clk", 0, 27), + MSTPCR("fpu0", "cpu_clk", 0, 24), + MSTPCR("intc0", "cpu_clk", 0, 22), + MSTPCR("dmac0", "bus_clk", 0, 21), + MSTPCR("sh0", "sh_clk", 0, 20), + MSTPCR("hudi0", "peripheral_clk", 0, 19), + MSTPCR("ubc0", "cpu_clk", 0, 17), + MSTPCR("tmu0", "peripheral_clk", 0, 15), + MSTPCR("cmt0", "r_clk", 0, 14), + MSTPCR("rwdt0", "r_clk", 0, 13), + MSTPCR("dmac1", "bus_clk", 0, 12), + MSTPCR("tmu1", "peripheral_clk", 0, 11), + MSTPCR("flctl0", "peripheral_clk", 0, 10), + MSTPCR("scif0", "peripheral_clk", 0, 9), + MSTPCR("scif1", "peripheral_clk", 0, 8), + MSTPCR("scif2", "peripheral_clk", 0, 7), + MSTPCR("scif3", "bus_clk", 0, 6), + MSTPCR("scif4", "bus_clk", 0, 5), + MSTPCR("scif5", "bus_clk", 0, 4), + MSTPCR("msiof0", "bus_clk", 0, 2), + MSTPCR("msiof1", "bus_clk", 0, 1), + MSTPCR("meram0", "sh_clk", 0, 0), + MSTPCR("i2c0", "peripheral_clk", 1, 9), + MSTPCR("rtc0", "r_clk", 1, 8), + MSTPCR("atapi0", "sh_clk", 2, 28), + MSTPCR("adc0", "peripheral_clk", 2, 28), + MSTPCR("tpu0", "bus_clk", 2, 25), + MSTPCR("irda0", "peripheral_clk", 2, 24), + MSTPCR("tsif0", "bus_clk", 2, 22), + MSTPCR("icb0", "bus_clk", 2, 21), + MSTPCR("sdhi0", "bus_clk", 2, 18), + MSTPCR("sdhi1", "bus_clk", 2, 17), + MSTPCR("keysc0", "r_clk", 2, 14), + MSTPCR("usb0", "bus_clk", 2, 11), + MSTPCR("2dg0", "bus_clk", 2, 10), + MSTPCR("siu0", "bus_clk", 2, 8), + MSTPCR("veu1", "bus_clk", 2, 6), + MSTPCR("vou0", "bus_clk", 2, 5), + MSTPCR("beu0", "bus_clk", 2, 4), + MSTPCR("ceu0", "bus_clk", 2, 3), + MSTPCR("veu0", "bus_clk", 2, 2), + MSTPCR("vpu0", "bus_clk", 2, 1), + MSTPCR("lcdc0", "bus_clk", 2, 0), +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7343) + MSTPCR("uram0", "umem_clk", 0, 28), + MSTPCR("xymem0", "bus_clk", 0, 26), + MSTPCR("tmu0", "peripheral_clk", 0, 15), + MSTPCR("cmt0", "r_clk", 0, 14), + MSTPCR("rwdt0", "r_clk", 0, 13), + MSTPCR("scif0", "peripheral_clk", 0, 7), + MSTPCR("scif1", "peripheral_clk", 0, 6), + MSTPCR("scif2", "peripheral_clk", 0, 5), + MSTPCR("scif3", "peripheral_clk", 0, 4), + MSTPCR("i2c0", "peripheral_clk", 1, 9), + MSTPCR("i2c1", "peripheral_clk", 1, 8), + MSTPCR("sdhi0", "peripheral_clk", 2, 18), + MSTPCR("keysc0", "r_clk", 2, 14), + MSTPCR("usbf0", "peripheral_clk", 2, 11), + MSTPCR("siu0", "bus_clk", 2, 8), + MSTPCR("jpu0", "bus_clk", 2, 6), + MSTPCR("vou0", "bus_clk", 2, 5), + MSTPCR("beu0", "bus_clk", 2, 4), + MSTPCR("ceu0", "bus_clk", 2, 3), + MSTPCR("veu0", "bus_clk", 2, 2), + MSTPCR("vpu0", "bus_clk", 2, 1), + MSTPCR("lcdc0", "bus_clk", 2, 0), +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7366) + /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */ + MSTPCR("tlb0", "cpu_clk", 0, 31), + MSTPCR("ic0", "cpu_clk", 0, 30), + MSTPCR("oc0", "cpu_clk", 0, 29), + MSTPCR("rsmem0", "sh_clk", 0, 28), + MSTPCR("xymem0", "cpu_clk", 0, 26), + MSTPCR("intc30", "peripheral_clk", 0, 23), + MSTPCR("intc0", "peripheral_clk", 0, 22), + MSTPCR("dmac0", "bus_clk", 0, 21), + MSTPCR("sh0", "sh_clk", 0, 20), + MSTPCR("hudi0", "peripheral_clk", 0, 19), + MSTPCR("ubc0", "cpu_clk", 0, 17), + MSTPCR("tmu0", "peripheral_clk", 0, 15), + MSTPCR("cmt0", "r_clk", 0, 14), + MSTPCR("rwdt0", "r_clk", 0, 13), + MSTPCR("flctl0", "peripheral_clk", 0, 10), + MSTPCR("scif0", "peripheral_clk", 0, 7), + MSTPCR("scif1", "bus_clk", 0, 6), + MSTPCR("scif2", "bus_clk", 0, 5), + MSTPCR("msiof0", "peripheral_clk", 0, 2), + MSTPCR("sbr0", "peripheral_clk", 0, 1), + MSTPCR("i2c0", "peripheral_clk", 1, 9), + MSTPCR("icb0", "bus_clk", 2, 27), + MSTPCR("meram0", "sh_clk", 2, 26), + MSTPCR("dacc0", "peripheral_clk", 2, 24), + MSTPCR("dacy0", "peripheral_clk", 2, 23), + MSTPCR("tsif0", "bus_clk", 2, 22), + MSTPCR("sdhi0", "bus_clk", 2, 18), + MSTPCR("mmcif0", "bus_clk", 2, 17), + MSTPCR("usb0", "bus_clk", 2, 11), + MSTPCR("siu0", "bus_clk", 2, 8), + MSTPCR("veu1", "bus_clk", 2, 7), + MSTPCR("vou0", "bus_clk", 2, 5), + MSTPCR("beu0", "bus_clk", 2, 4), + MSTPCR("ceu0", "bus_clk", 2, 3), + MSTPCR("veu0", "bus_clk", 2, 2), + MSTPCR("vpu0", "bus_clk", 2, 1), + MSTPCR("lcdc0", "bus_clk", 2, 0), +#endif }; static struct clk *sh7722_clocks[] = { @@ -710,21 +815,30 @@ arch_init_clk_ops(struct clk_ops **ops, int type) int __init arch_clk_init(void) { - struct clk *master; + struct clk *clk; int i; - master = clk_get(NULL, "master_clk"); + clk = clk_get(NULL, "master_clk"); for (i = 0; i < ARRAY_SIZE(sh7722_clocks); i++) { pr_debug( "Registering clock '%s'\n", sh7722_clocks[i]->name); - sh7722_clocks[i]->parent = master; + sh7722_clocks[i]->parent = clk; clk_register(sh7722_clocks[i]); } - clk_put(master); - - for (i = 0; i < ARRAY_SIZE(sh7722_mstpcr); i++) { - pr_debug( "Registering mstpcr '%s'\n", sh7722_mstpcr[i].name); - clk_register(&sh7722_mstpcr[i]); + clk_put(clk); + + clk_register(&sh7722_r_clock); + + for (i = 0; i < ARRAY_SIZE(sh7722_mstpcr_clocks); i++) { + pr_debug( "Registering mstpcr clock '%s'\n", + sh7722_mstpcr_clocks[i].name); + clk = clk_get(NULL, (void *) sh7722_mstpcr_clocks[i].ops); + sh7722_mstpcr_clocks[i].parent = clk; + sh7722_mstpcr_clocks[i].ops = &sh7722_mstpcr_clk_ops; + clk_register(&sh7722_mstpcr_clocks[i]); + clk_put(clk); } + clk_recalc_rate(&sh7722_r_clock); /* make sure rate gets propagated */ + return 0; } diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 78881b4214da..0623e377f488 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -30,6 +30,7 @@ static struct resource iic0_resources[] = { static struct platform_device iic0_device = { .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, }; @@ -50,6 +51,7 @@ static struct resource iic1_resources[] = { static struct platform_device iic1_device = { .name = "i2c-sh_mobile", + .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, }; @@ -115,7 +117,22 @@ static struct plat_sci_port sci_platform_data[] = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, - .irqs = { 80, 81, 83, 82 }, + .irqs = { 80, 80, 80, 80 }, + }, { + .mapbase = 0xffe10000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 81, 81, 81, 81 }, + }, { + .mapbase = 0xffe20000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 82, 82, 82, 82 }, + }, { + .mapbase = 0xffe30000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 83, 83, 83, 83 }, }, { .flags = 0, } @@ -139,18 +156,10 @@ static struct platform_device *sh7343_devices[] __initdata = { static int __init sh7343_devices_setup(void) { - clk_always_enable("mstp031"); /* TLB */ - clk_always_enable("mstp030"); /* IC */ - clk_always_enable("mstp029"); /* OC */ - clk_always_enable("mstp028"); /* URAM */ - clk_always_enable("mstp026"); /* XYMEM */ - clk_always_enable("mstp023"); /* INTC3 */ - clk_always_enable("mstp022"); /* INTC */ - clk_always_enable("mstp020"); /* SuperHyway */ - clk_always_enable("mstp109"); /* I2C0 */ - clk_always_enable("mstp108"); /* I2C1 */ - clk_always_enable("mstp202"); /* VEU */ - clk_always_enable("mstp201"); /* VPU */ + clk_always_enable("uram0"); /* URAM */ + clk_always_enable("xymem0"); /* XYMEM */ + clk_always_enable("veu0"); /* VEU */ + clk_always_enable("vpu0"); /* VPU */ platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20); platform_resource_setup_memory(&veu_device, "veu", 2 << 20); @@ -171,7 +180,7 @@ enum { MMC_ERR, MMC_TRAN, MMC_FSTAT, MMC_FRDY, DMAC4, DMAC5, DMAC_DADERR, KEYSC, - SCIF, SCIF1, SCIF2, SCIF3, SCIF4, + SCIF, SCIF1, SCIF2, SCIF3, SIOF0, SIOF1, SIO, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index e17db39b97aa..839ae97a7fd2 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -32,6 +32,7 @@ static struct resource iic_resources[] = { static struct platform_device iic_device = { .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, }; @@ -176,19 +177,11 @@ static struct platform_device *sh7366_devices[] __initdata = { static int __init sh7366_devices_setup(void) { - clk_always_enable("mstp031"); /* TLB */ - clk_always_enable("mstp030"); /* IC */ - clk_always_enable("mstp029"); /* OC */ - clk_always_enable("mstp028"); /* RSMEM */ - clk_always_enable("mstp026"); /* XYMEM */ - clk_always_enable("mstp023"); /* INTC3 */ - clk_always_enable("mstp022"); /* INTC */ - clk_always_enable("mstp020"); /* SuperHyway */ - clk_always_enable("mstp109"); /* I2C */ - clk_always_enable("mstp211"); /* USB */ - clk_always_enable("mstp207"); /* VEU-2 */ - clk_always_enable("mstp202"); /* VEU-1 */ - clk_always_enable("mstp201"); /* VPU */ + clk_always_enable("rsmem0"); /* RSMEM */ + clk_always_enable("xymem0"); /* XYMEM */ + clk_always_enable("veu1"); /* VEU-2 */ + clk_always_enable("veu0"); /* VEU-1 */ + clk_always_enable("vpu0"); /* VPU */ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ef77ee1d9f53..50cf6838ec41 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -62,7 +62,7 @@ static struct resource usbf_resources[] = { static struct platform_device usbf_device = { .name = "m66592_udc", - .id = -1, + .id = 0, /* "usbf0" clock */ .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, @@ -87,6 +87,7 @@ static struct resource iic_resources[] = { static struct platform_device iic_device = { .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, }; @@ -147,6 +148,34 @@ static struct platform_device veu_device = { .num_resources = ARRAY_SIZE(veu_resources), }; +static struct uio_info jpu_platform_data = { + .name = "JPU", + .version = "0", + .irq = 27, +}; + +static struct resource jpu_resources[] = { + [0] = { + .name = "JPU", + .start = 0xfea00000, + .end = 0xfea102d0, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device jpu_device = { + .name = "uio_pdrv_genirq", + .id = 2, + .dev = { + .platform_data = &jpu_platform_data, + }, + .resource = jpu_resources, + .num_resources = ARRAY_SIZE(jpu_resources), +}; + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xffe00000, @@ -186,24 +215,21 @@ static struct platform_device *sh7722_devices[] __initdata = { &sci_device, &vpu_device, &veu_device, + &jpu_device, }; static int __init sh7722_devices_setup(void) { - clk_always_enable("mstp031"); /* TLB */ - clk_always_enable("mstp030"); /* IC */ - clk_always_enable("mstp029"); /* OC */ - clk_always_enable("mstp028"); /* URAM */ - clk_always_enable("mstp026"); /* XYMEM */ - clk_always_enable("mstp022"); /* INTC */ - clk_always_enable("mstp020"); /* SuperHyway */ - clk_always_enable("mstp109"); /* I2C */ - clk_always_enable("mstp211"); /* USB */ - clk_always_enable("mstp202"); /* VEU */ - clk_always_enable("mstp201"); /* VPU */ + clk_always_enable("uram0"); /* URAM */ + clk_always_enable("xymem0"); /* XYMEM */ + clk_always_enable("rtc0"); /* RTC */ + clk_always_enable("veu0"); /* VEU */ + clk_always_enable("vpu0"); /* VPU */ + clk_always_enable("jpu0"); /* JPU */ platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20); platform_resource_setup_memory(&veu_device, "veu", 2 << 20); + platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); return platform_add_devices(sh7722_devices, ARRAY_SIZE(sh7722_devices)); diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 6d9e6972cfc9..849770d780ae 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -215,6 +215,7 @@ static struct resource iic_resources[] = { static struct platform_device iic_device = { .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, }; @@ -231,19 +232,11 @@ static struct platform_device *sh7723_devices[] __initdata = { static int __init sh7723_devices_setup(void) { - clk_always_enable("mstp031"); /* TLB */ - clk_always_enable("mstp030"); /* IC */ - clk_always_enable("mstp029"); /* OC */ - clk_always_enable("mstp024"); /* FPU */ - clk_always_enable("mstp022"); /* INTC */ - clk_always_enable("mstp020"); /* SuperHyway */ - clk_always_enable("mstp000"); /* MERAM */ - clk_always_enable("mstp109"); /* I2C */ - clk_always_enable("mstp108"); /* RTC */ - clk_always_enable("mstp211"); /* USB */ - clk_always_enable("mstp206"); /* VEU2H1 */ - clk_always_enable("mstp202"); /* VEU2H0 */ - clk_always_enable("mstp201"); /* VPU */ + clk_always_enable("meram0"); /* MERAM */ + clk_always_enable("rtc0"); /* RTC */ + clk_always_enable("veu1"); /* VEU2H1 */ + clk_always_enable("veu0"); /* VEU2H0 */ + clk_always_enable("vpu0"); /* VPU */ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S index 13b66746410a..591741383ee6 100644 --- a/arch/sh/kernel/debugtraps.S +++ b/arch/sh/kernel/debugtraps.S @@ -3,7 +3,7 @@ * * Debug trap jump tables for SuperH * - * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2006 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -12,12 +12,13 @@ #include <linux/sys.h> #include <linux/linkage.h> -#if !defined(CONFIG_SH_KGDB) -#define kgdb_handle_exception debug_trap_handler +#if !defined(CONFIG_KGDB) +#define breakpoint_trap_handler debug_trap_handler +#define singlestep_trap_handler debug_trap_handler #endif #if !defined(CONFIG_SH_STANDARD_BIOS) -#define sh_bios_handler debug_trap_handler +#define sh_bios_handler debug_trap_handler #endif .data @@ -35,7 +36,7 @@ ENTRY(debug_trap_table) .long debug_trap_handler /* 0x39 */ .long debug_trap_handler /* 0x3a */ .long debug_trap_handler /* 0x3b */ - .long kgdb_handle_exception /* 0x3c */ - .long debug_trap_handler /* 0x3d */ + .long breakpoint_trap_handler /* 0x3c */ + .long singlestep_trap_handler /* 0x3d */ .long bug_trap_handler /* 0x3e */ .long sh_bios_handler /* 0x3f */ diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c new file mode 100644 index 000000000000..64d5d8dded7c --- /dev/null +++ b/arch/sh/kernel/disassemble.c @@ -0,0 +1,573 @@ +/* + * Disassemble SuperH instructions. + * + * Copyright (C) 1999 kaz Kojima + * Copyright (C) 2008 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/uaccess.h> + +/* + * Format of an instruction in memory. + */ +typedef enum { + HEX_0, HEX_1, HEX_2, HEX_3, HEX_4, HEX_5, HEX_6, HEX_7, + HEX_8, HEX_9, HEX_A, HEX_B, HEX_C, HEX_D, HEX_E, HEX_F, + REG_N, REG_M, REG_NM, REG_B, + BRANCH_12, BRANCH_8, + DISP_8, DISP_4, + IMM_4, IMM_4BY2, IMM_4BY4, PCRELIMM_8BY2, PCRELIMM_8BY4, + IMM_8, IMM_8BY2, IMM_8BY4, +} sh_nibble_type; + +typedef enum { + A_END, A_BDISP12, A_BDISP8, + A_DEC_M, A_DEC_N, + A_DISP_GBR, A_DISP_PC, A_DISP_REG_M, A_DISP_REG_N, + A_GBR, + A_IMM, + A_INC_M, A_INC_N, + A_IND_M, A_IND_N, A_IND_R0_REG_M, A_IND_R0_REG_N, + A_MACH, A_MACL, + A_PR, A_R0, A_R0_GBR, A_REG_M, A_REG_N, A_REG_B, + A_SR, A_VBR, A_SSR, A_SPC, A_SGR, A_DBR, + F_REG_N, F_REG_M, D_REG_N, D_REG_M, + X_REG_N, /* Only used for argument parsing */ + X_REG_M, /* Only used for argument parsing */ + DX_REG_N, DX_REG_M, V_REG_N, V_REG_M, + FD_REG_N, + XMTRX_M4, + F_FR0, + FPUL_N, FPUL_M, FPSCR_N, FPSCR_M, +} sh_arg_type; + +static struct sh_opcode_info { + char *name; + sh_arg_type arg[7]; + sh_nibble_type nibbles[4]; +} sh_table[] = { + {"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM_8}}, + {"add",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_C}}, + {"addc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_E}}, + {"addv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_F}}, + {"and",{A_IMM,A_R0},{HEX_C,HEX_9,IMM_8}}, + {"and",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_9}}, + {"and.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_D,IMM_8}}, + {"bra",{A_BDISP12},{HEX_A,BRANCH_12}}, + {"bsr",{A_BDISP12},{HEX_B,BRANCH_12}}, + {"bt",{A_BDISP8},{HEX_8,HEX_9,BRANCH_8}}, + {"bf",{A_BDISP8},{HEX_8,HEX_B,BRANCH_8}}, + {"bt.s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}}, + {"bt/s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}}, + {"bf.s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}}, + {"bf/s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}}, + {"clrmac",{0},{HEX_0,HEX_0,HEX_2,HEX_8}}, + {"clrs",{0},{HEX_0,HEX_0,HEX_4,HEX_8}}, + {"clrt",{0},{HEX_0,HEX_0,HEX_0,HEX_8}}, + {"cmp/eq",{A_IMM,A_R0},{HEX_8,HEX_8,IMM_8}}, + {"cmp/eq",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_0}}, + {"cmp/ge",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_3}}, + {"cmp/gt",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_7}}, + {"cmp/hi",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_6}}, + {"cmp/hs",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_2}}, + {"cmp/pl",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_5}}, + {"cmp/pz",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_1}}, + {"cmp/str",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_C}}, + {"div0s",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_7}}, + {"div0u",{0},{HEX_0,HEX_0,HEX_1,HEX_9}}, + {"div1",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_4}}, + {"exts.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_E}}, + {"exts.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_F}}, + {"extu.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_C}}, + {"extu.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_D}}, + {"jmp",{A_IND_N},{HEX_4,REG_N,HEX_2,HEX_B}}, + {"jsr",{A_IND_N},{HEX_4,REG_N,HEX_0,HEX_B}}, + {"ldc",{A_REG_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_E}}, + {"ldc",{A_REG_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_E}}, + {"ldc",{A_REG_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_E}}, + {"ldc",{A_REG_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_E}}, + {"ldc",{A_REG_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_E}}, + {"ldc",{A_REG_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_E}}, + {"ldc",{A_REG_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_E}}, + {"ldc.l",{A_INC_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_7}}, + {"ldc.l",{A_INC_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_7}}, + {"ldc.l",{A_INC_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_7}}, + {"ldc.l",{A_INC_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_7}}, + {"ldc.l",{A_INC_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_7}}, + {"ldc.l",{A_INC_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_7}}, + {"ldc.l",{A_INC_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_7}}, + {"lds",{A_REG_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_A}}, + {"lds",{A_REG_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_A}}, + {"lds",{A_REG_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_A}}, + {"lds",{A_REG_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_A}}, + {"lds",{A_REG_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_A}}, + {"lds.l",{A_INC_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_6}}, + {"lds.l",{A_INC_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_6}}, + {"lds.l",{A_INC_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_6}}, + {"lds.l",{A_INC_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_6}}, + {"lds.l",{A_INC_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_6}}, + {"ldtlb",{0},{HEX_0,HEX_0,HEX_3,HEX_8}}, + {"mac.w",{A_INC_M,A_INC_N},{HEX_4,REG_N,REG_M,HEX_F}}, + {"mov",{A_IMM,A_REG_N},{HEX_E,REG_N,IMM_8}}, + {"mov",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_3}}, + {"mov.b",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_4}}, + {"mov.b",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_4}}, + {"mov.b",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_0}}, + {"mov.b",{A_DISP_REG_M,A_R0},{HEX_8,HEX_4,REG_M,IMM_4}}, + {"mov.b",{A_DISP_GBR,A_R0},{HEX_C,HEX_4,IMM_8}}, + {"mov.b",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_C}}, + {"mov.b",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_4}}, + {"mov.b",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_0}}, + {"mov.b",{A_R0,A_DISP_REG_M},{HEX_8,HEX_0,REG_M,IMM_4}}, + {"mov.b",{A_R0,A_DISP_GBR},{HEX_C,HEX_0,IMM_8}}, + {"mov.l",{ A_REG_M,A_DISP_REG_N},{HEX_1,REG_N,REG_M,IMM_4BY4}}, + {"mov.l",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_6}}, + {"mov.l",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_6}}, + {"mov.l",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_2}}, + {"mov.l",{A_DISP_REG_M,A_REG_N},{HEX_5,REG_N,REG_M,IMM_4BY4}}, + {"mov.l",{A_DISP_GBR,A_R0},{HEX_C,HEX_6,IMM_8BY4}}, + {"mov.l",{A_DISP_PC,A_REG_N},{HEX_D,REG_N,PCRELIMM_8BY4}}, + {"mov.l",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_E}}, + {"mov.l",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_6}}, + {"mov.l",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_2}}, + {"mov.l",{A_R0,A_DISP_GBR},{HEX_C,HEX_2,IMM_8BY4}}, + {"mov.w",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_5}}, + {"mov.w",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_5}}, + {"mov.w",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_1}}, + {"mov.w",{A_DISP_REG_M,A_R0},{HEX_8,HEX_5,REG_M,IMM_4BY2}}, + {"mov.w",{A_DISP_GBR,A_R0},{HEX_C,HEX_5,IMM_8BY2}}, + {"mov.w",{A_DISP_PC,A_REG_N},{HEX_9,REG_N,PCRELIMM_8BY2}}, + {"mov.w",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_D}}, + {"mov.w",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_5}}, + {"mov.w",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_1}}, + {"mov.w",{A_R0,A_DISP_REG_M},{HEX_8,HEX_1,REG_M,IMM_4BY2}}, + {"mov.w",{A_R0,A_DISP_GBR},{HEX_C,HEX_1,IMM_8BY2}}, + {"mova",{A_DISP_PC,A_R0},{HEX_C,HEX_7,PCRELIMM_8BY4}}, + {"movca.l",{A_R0,A_IND_N},{HEX_0,REG_N,HEX_C,HEX_3}}, + {"movt",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_9}}, + {"muls",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_F}}, + {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}}, + {"mulu",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_E}}, + {"neg",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_B}}, + {"negc",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_A}}, + {"nop",{0},{HEX_0,HEX_0,HEX_0,HEX_9}}, + {"not",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_7}}, + {"ocbi",{A_IND_N},{HEX_0,REG_N,HEX_9,HEX_3}}, + {"ocbp",{A_IND_N},{HEX_0,REG_N,HEX_A,HEX_3}}, + {"ocbwb",{A_IND_N},{HEX_0,REG_N,HEX_B,HEX_3}}, + {"or",{A_IMM,A_R0},{HEX_C,HEX_B,IMM_8}}, + {"or",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_B}}, + {"or.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_F,IMM_8}}, + {"pref",{A_IND_N},{HEX_0,REG_N,HEX_8,HEX_3}}, + {"rotcl",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_4}}, + {"rotcr",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_5}}, + {"rotl",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_4}}, + {"rotr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_5}}, + {"rte",{0},{HEX_0,HEX_0,HEX_2,HEX_B}}, + {"rts",{0},{HEX_0,HEX_0,HEX_0,HEX_B}}, + {"sets",{0},{HEX_0,HEX_0,HEX_5,HEX_8}}, + {"sett",{0},{HEX_0,HEX_0,HEX_1,HEX_8}}, + {"shad",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_C}}, + {"shld",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_D}}, + {"shal",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_0}}, + {"shar",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_1}}, + {"shll",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_0}}, + {"shll16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_8}}, + {"shll2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_8}}, + {"shll8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_8}}, + {"shlr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_1}}, + {"shlr16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_9}}, + {"shlr2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_9}}, + {"shlr8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_9}}, + {"sleep",{0},{HEX_0,HEX_0,HEX_1,HEX_B}}, + {"stc",{A_SR,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_2}}, + {"stc",{A_GBR,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_2}}, + {"stc",{A_VBR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_2}}, + {"stc",{A_SSR,A_REG_N},{HEX_0,REG_N,HEX_3,HEX_2}}, + {"stc",{A_SPC,A_REG_N},{HEX_0,REG_N,HEX_4,HEX_2}}, + {"stc",{A_SGR,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_2}}, + {"stc",{A_DBR,A_REG_N},{HEX_0,REG_N,HEX_7,HEX_2}}, + {"stc",{A_REG_B,A_REG_N},{HEX_0,REG_N,REG_B,HEX_2}}, + {"stc.l",{A_SR,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_3}}, + {"stc.l",{A_GBR,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_3}}, + {"stc.l",{A_VBR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_3}}, + {"stc.l",{A_SSR,A_DEC_N},{HEX_4,REG_N,HEX_3,HEX_3}}, + {"stc.l",{A_SPC,A_DEC_N},{HEX_4,REG_N,HEX_4,HEX_3}}, + {"stc.l",{A_SGR,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_3}}, + {"stc.l",{A_DBR,A_DEC_N},{HEX_4,REG_N,HEX_7,HEX_3}}, + {"stc.l",{A_REG_B,A_DEC_N},{HEX_4,REG_N,REG_B,HEX_3}}, + {"sts",{A_MACH,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_A}}, + {"sts",{A_MACL,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_A}}, + {"sts",{A_PR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_A}}, + {"sts",{FPUL_M,A_REG_N},{HEX_0,REG_N,HEX_5,HEX_A}}, + {"sts",{FPSCR_M,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_A}}, + {"sts.l",{A_MACH,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_2}}, + {"sts.l",{A_MACL,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_2}}, + {"sts.l",{A_PR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_2}}, + {"sts.l",{FPUL_M,A_DEC_N},{HEX_4,REG_N,HEX_5,HEX_2}}, + {"sts.l",{FPSCR_M,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_2}}, + {"sub",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_8}}, + {"subc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_A}}, + {"subv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_B}}, + {"swap.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_8}}, + {"swap.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_9}}, + {"tas.b",{A_IND_N},{HEX_4,REG_N,HEX_1,HEX_B}}, + {"trapa",{A_IMM},{HEX_C,HEX_3,IMM_8}}, + {"tst",{A_IMM,A_R0},{HEX_C,HEX_8,IMM_8}}, + {"tst",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_8}}, + {"tst.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_C,IMM_8}}, + {"xor",{A_IMM,A_R0},{HEX_C,HEX_A,IMM_8}}, + {"xor",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_A}}, + {"xor.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_E,IMM_8}}, + {"xtrct",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_D}}, + {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}}, + {"dt",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_0}}, + {"dmuls.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_D}}, + {"dmulu.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_5}}, + {"mac.l",{A_INC_M,A_INC_N},{HEX_0,REG_N,REG_M,HEX_F}}, + {"braf",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_3}}, + {"bsrf",{A_REG_N},{HEX_0,REG_N,HEX_0,HEX_3}}, + {"fabs",{FD_REG_N},{HEX_F,REG_N,HEX_5,HEX_D}}, + {"fadd",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_0}}, + {"fadd",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_0}}, + {"fcmp/eq",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_4}}, + {"fcmp/eq",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_4}}, + {"fcmp/gt",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_5}}, + {"fcmp/gt",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_5}}, + {"fcnvds",{D_REG_N,FPUL_M},{HEX_F,REG_N,HEX_B,HEX_D}}, + {"fcnvsd",{FPUL_M,D_REG_N},{HEX_F,REG_N,HEX_A,HEX_D}}, + {"fdiv",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_3}}, + {"fdiv",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_3}}, + {"fipr",{V_REG_M,V_REG_N},{HEX_F,REG_NM,HEX_E,HEX_D}}, + {"fldi0",{F_REG_N},{HEX_F,REG_N,HEX_8,HEX_D}}, + {"fldi1",{F_REG_N},{HEX_F,REG_N,HEX_9,HEX_D}}, + {"flds",{F_REG_N,FPUL_M},{HEX_F,REG_N,HEX_1,HEX_D}}, + {"float",{FPUL_M,FD_REG_N},{HEX_F,REG_N,HEX_2,HEX_D}}, + {"fmac",{F_FR0,F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_E}}, + {"fmov",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_C}}, + {"fmov",{DX_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_C}}, + {"fmov",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmov",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmov.d",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov.d",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov.d",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov.d",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov.d",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov.d",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmov.s",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov.s",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov.s",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov.s",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov.s",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov.s",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmul",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_2}}, + {"fmul",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_2}}, + {"fneg",{FD_REG_N},{HEX_F,REG_N,HEX_4,HEX_D}}, + {"frchg",{0},{HEX_F,HEX_B,HEX_F,HEX_D}}, + {"fschg",{0},{HEX_F,HEX_3,HEX_F,HEX_D}}, + {"fsqrt",{FD_REG_N},{HEX_F,REG_N,HEX_6,HEX_D}}, + {"fsts",{FPUL_M,F_REG_N},{HEX_F,REG_N,HEX_0,HEX_D}}, + {"fsub",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_1}}, + {"fsub",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_1}}, + {"ftrc",{FD_REG_N,FPUL_M},{HEX_F,REG_N,HEX_3,HEX_D}}, + {"ftrv",{XMTRX_M4,V_REG_N},{HEX_F,REG_NM,HEX_F,HEX_D}}, + { 0 }, +}; + +static void print_sh_insn(u32 memaddr, u16 insn) +{ + int relmask = ~0; + int nibs[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf}; + int lastsp; + struct sh_opcode_info *op = sh_table; + + for (; op->name; op++) { + int n; + int imm = 0; + int rn = 0; + int rm = 0; + int rb = 0; + int disp_pc; + int disp_pc_addr = 0; + + for (n = 0; n < 4; n++) { + int i = op->nibbles[n]; + + if (i < 16) { + if (nibs[n] == i) + continue; + goto fail; + } + switch (i) { + case BRANCH_8: + imm = (nibs[2] << 4) | (nibs[3]); + if (imm & 0x80) + imm |= ~0xff; + imm = ((char)imm) * 2 + 4 ; + goto ok; + case BRANCH_12: + imm = ((nibs[1]) << 8) | (nibs[2] << 4) | (nibs[3]); + if (imm & 0x800) + imm |= ~0xfff; + imm = imm * 2 + 4; + goto ok; + case IMM_4: + imm = nibs[3]; + goto ok; + case IMM_4BY2: + imm = nibs[3] <<1; + goto ok; + case IMM_4BY4: + imm = nibs[3] <<2; + goto ok; + case IMM_8: + imm = (nibs[2] << 4) | nibs[3]; + goto ok; + case PCRELIMM_8BY2: + imm = ((nibs[2] << 4) | nibs[3]) <<1; + relmask = ~1; + goto ok; + case PCRELIMM_8BY4: + imm = ((nibs[2] << 4) | nibs[3]) <<2; + relmask = ~3; + goto ok; + case IMM_8BY2: + imm = ((nibs[2] << 4) | nibs[3]) <<1; + goto ok; + case IMM_8BY4: + imm = ((nibs[2] << 4) | nibs[3]) <<2; + goto ok; + case DISP_8: + imm = (nibs[2] << 4) | (nibs[3]); + goto ok; + case DISP_4: + imm = nibs[3]; + goto ok; + case REG_N: + rn = nibs[n]; + break; + case REG_M: + rm = nibs[n]; + break; + case REG_NM: + rn = (nibs[n] & 0xc) >> 2; + rm = (nibs[n] & 0x3); + break; + case REG_B: + rb = nibs[n] & 0x07; + break; + default: + return; + } + } + + ok: + printk("%-8s ", op->name); + lastsp = (op->arg[0] == A_END); + disp_pc = 0; + for (n = 0; n < 6 && op->arg[n] != A_END; n++) { + if (n && op->arg[1] != A_END) + printk(", "); + switch (op->arg[n]) { + case A_IMM: + printk("#%d", (char)(imm)); + break; + case A_R0: + printk("r0"); + break; + case A_REG_N: + printk("r%d", rn); + break; + case A_INC_N: + printk("@r%d+", rn); + break; + case A_DEC_N: + printk("@-r%d", rn); + break; + case A_IND_N: + printk("@r%d", rn); + break; + case A_DISP_REG_N: + printk("@(%d,r%d)", imm, rn); + break; + case A_REG_M: + printk("r%d", rm); + break; + case A_INC_M: + printk("@r%d+", rm); + break; + case A_DEC_M: + printk("@-r%d", rm); + break; + case A_IND_M: + printk("@r%d", rm); + break; + case A_DISP_REG_M: + printk("@(%d,r%d)", imm, rm); + break; + case A_REG_B: + printk("r%d_bank", rb); + break; + case A_DISP_PC: + disp_pc = 1; + disp_pc_addr = imm + 4 + (memaddr & relmask); + printk("%08x <%pS>", disp_pc_addr, + (void *)disp_pc_addr); + break; + case A_IND_R0_REG_N: + printk("@(r0,r%d)", rn); + break; + case A_IND_R0_REG_M: + printk("@(r0,r%d)", rm); + break; + case A_DISP_GBR: + printk("@(%d,gbr)",imm); + break; + case A_R0_GBR: + printk("@(r0,gbr)"); + break; + case A_BDISP12: + case A_BDISP8: + printk("%08x", imm + memaddr); + break; + case A_SR: + printk("sr"); + break; + case A_GBR: + printk("gbr"); + break; + case A_VBR: + printk("vbr"); + break; + case A_SSR: + printk("ssr"); + break; + case A_SPC: + printk("spc"); + break; + case A_MACH: + printk("mach"); + break; + case A_MACL: + printk("macl"); + break; + case A_PR: + printk("pr"); + break; + case A_SGR: + printk("sgr"); + break; + case A_DBR: + printk("dbr"); + break; + case FD_REG_N: + if (0) + goto d_reg_n; + case F_REG_N: + printk("fr%d", rn); + break; + case F_REG_M: + printk("fr%d", rm); + break; + case DX_REG_N: + if (rn & 1) { + printk("xd%d", rn & ~1); + break; + } + d_reg_n: + case D_REG_N: + printk("dr%d", rn); + break; + case DX_REG_M: + if (rm & 1) { + printk("xd%d", rm & ~1); + break; + } + case D_REG_M: + printk("dr%d", rm); + break; + case FPSCR_M: + case FPSCR_N: + printk("fpscr"); + break; + case FPUL_M: + case FPUL_N: + printk("fpul"); + break; + case F_FR0: + printk("fr0"); + break; + case V_REG_N: + printk("fv%d", rn*4); + break; + case V_REG_M: + printk("fv%d", rm*4); + break; + case XMTRX_M4: + printk("xmtrx"); + break; + default: + return; + } + } + + if (disp_pc && strcmp(op->name, "mova") != 0) { + u32 val; + + if (relmask == ~1) + __get_user(val, (u16 *)disp_pc_addr); + else + __get_user(val, (u32 *)disp_pc_addr); + + printk(" ! %08x <%pS>", val, (void *)val); + } + + return; + fail: + ; + + } + + printk(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]); +} + +void show_code(struct pt_regs *regs) +{ + unsigned short *pc = (unsigned short *)regs->pc; + long i; + + if (regs->pc & 0x1) + return; + + printk("Code:\n"); + + for (i = -3 ; i < 6 ; i++) { + unsigned short insn; + + if (__get_user(insn, pc + i)) { + printk(" (Bad address in pc)\n"); + break; + } + + printk("%s%08lx: ", (i ? " ": "->"), (unsigned long)(pc + i)); + print_sh_insn((unsigned long)(pc + i), insn); + printk("\n"); + } + + printk("\n"); +} diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 5b7efc4016fa..d62359cfbbe2 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -308,15 +308,19 @@ ENTRY(system_call) mov.l 1f, r9 mov.l @r9, r8 ! Read from TRA (Trap Address) Register #endif + + mov #OFF_TRA, r10 + add r15, r10 + mov.l r8, @r10 ! set TRA value to tra + /* * Check the trap type */ mov #((0x20 << 2) - 1), r9 cmp/hi r9, r8 bt/s debug_trap ! it's a debug trap.. - mov #OFF_TRA, r9 - add r15, r9 - mov.l r8, @r9 ! set TRA value to tra + nop + #ifdef CONFIG_TRACE_IRQFLAGS mov.l 5f, r10 jsr @r10 @@ -371,47 +375,3 @@ syscall_exit: #endif 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave - -#ifdef CONFIG_FUNCTION_TRACER - .align 2 - .globl _mcount - .type _mcount,@function - .globl mcount - .type mcount,@function -_mcount: -mcount: - mov.l r4, @-r15 - mov.l r5, @-r15 - mov.l r6, @-r15 - mov.l r7, @-r15 - sts.l pr, @-r15 - - mov.l @(20,r15),r4 - sts pr, r5 - - mov.l 1f, r6 - mov.l ftrace_stub, r7 - cmp/eq r6, r7 - bt skip_trace - - mov.l @r6, r6 - jsr @r6 - nop - -skip_trace: - - lds.l @r15+, pr - mov.l @r15+, r7 - mov.l @r15+, r6 - mov.l @r15+, r5 - rts - mov.l @r15+, r4 - - .align 2 -1: .long ftrace_trace_function - - .globl ftrace_stub -ftrace_stub: - rts - nop -#endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c new file mode 100644 index 000000000000..4c3247477aa3 --- /dev/null +++ b/arch/sh/kernel/ftrace.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2008 Matt Fleming <mjf@gentoo.org> + * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org> + * + * Code for replacing ftrace calls with jumps. + * + * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> + * + * Thanks goes to Ingo Molnar, for suggesting the idea. + * Mathieu Desnoyers, for suggesting postponing the modifications. + * Arjan van de Ven, for keeping me straight, and explaining to me + * the dangers of modifying code on the run. + */ +#include <linux/uaccess.h> +#include <linux/ftrace.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/io.h> +#include <asm/ftrace.h> +#include <asm/cacheflush.h> + +static unsigned char ftrace_nop[] = { + 0x09, 0x00, /* nop */ + 0x09, 0x00, /* nop */ +}; + +static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; + +unsigned char *ftrace_nop_replace(void) +{ + return ftrace_nop; +} + +static int is_sh_nop(unsigned char *ip) +{ + return strncmp(ip, ftrace_nop, sizeof(ftrace_nop)); +} + +unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +{ + /* Place the address in the memory table. */ + if (addr == CALLER_ADDR) + __raw_writel(addr + MCOUNT_INSN_OFFSET, ftrace_replaced_code); + else + __raw_writel(addr, ftrace_replaced_code); + + /* + * No locking needed, this must be called via kstop_machine + * which in essence is like running on a uniprocessor machine. + */ + return ftrace_replaced_code; +} + +int ftrace_modify_code(unsigned long ip, unsigned char *old_code, + unsigned char *new_code) +{ + unsigned char replaced[MCOUNT_INSN_SIZE]; + + /* + * Note: Due to modules and __init, code can + * disappear and change, we need to protect against faulting + * as well as code changing. We do this by using the + * probe_kernel_* functions. + * + * No real locking needed, this code is run through + * kstop_machine, or before SMP starts. + */ + + /* + * If we're trying to nop out a call to a function, we instead + * place a call to the address after the memory table. + */ + if (is_sh_nop(new_code) == 0) + __raw_writel(ip + MCOUNT_INSN_SIZE, (unsigned long)new_code); + + /* read the text we want to modify */ + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ + if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) + return -EINVAL; + + /* replace the text with the new text */ + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) + return -EPERM; + + flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); + + return 0; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned char old[MCOUNT_INSN_SIZE], *new; + + memcpy(old, (unsigned char *)(ip + MCOUNT_INSN_OFFSET), MCOUNT_INSN_SIZE); + new = ftrace_call_replace(ip, (unsigned long)func); + + return ftrace_modify_code(ip + MCOUNT_INSN_OFFSET, old, new); +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_call_replace(ip, addr); + new = ftrace_nop_replace(); + + return ftrace_modify_code(rec->ip, old, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(); + new = ftrace_call_replace(ip, addr); + + return ftrace_modify_code(rec->ip, old, new); +} + +int __init ftrace_dyn_arch_init(void *data) +{ + /* The return code is retured via data */ + __raw_writel(0, (unsigned long)data); + + return 0; +} diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index ae0a382a82eb..788605ff7088 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -80,8 +80,14 @@ ENTRY(_stext) mov.l 7f, r0 ldc r0, r7_bank ! ... and initial thread_info #endif - - ! Clear BSS area + +#ifndef CONFIG_SH_NO_BSS_INIT + /* + * Don't clear BSS if running on slow platforms such as an RTL simulation, + * remote memory via SHdebug link, etc. For these the memory can be guaranteed + * to be all zero on boot anyway. + */ + ! Clear BSS area #ifdef CONFIG_SMP mov.l 3f, r0 cmp/eq #0, r0 ! skip clear if set to zero @@ -97,6 +103,8 @@ ENTRY(_stext) mov.l r0,@-r2 10: +#endif + ! Additional CPU initialization mov.l 6f, r0 jsr @r0 diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c new file mode 100644 index 000000000000..fe59ccfc1152 --- /dev/null +++ b/arch/sh/kernel/idle.c @@ -0,0 +1,81 @@ +/* + * The idle loop for all SuperH platforms. + * + * Copyright (C) 2002 - 2008 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/pm.h> +#include <linux/tick.h> +#include <linux/preempt.h> +#include <linux/thread_info.h> +#include <linux/irqflags.h> +#include <asm/pgalloc.h> +#include <asm/system.h> +#include <asm/atomic.h> + +static int hlt_counter; +void (*pm_idle)(void); +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +static int __init nohlt_setup(char *__unused) +{ + hlt_counter = 1; + return 1; +} +__setup("nohlt", nohlt_setup); + +static int __init hlt_setup(char *__unused) +{ + hlt_counter = 0; + return 1; +} +__setup("hlt", hlt_setup); + +static void default_idle(void) +{ + if (!hlt_counter) { + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb__after_clear_bit(); + set_bl_bit(); + stop_critical_timings(); + + while (!need_resched()) + cpu_sleep(); + + start_critical_timings(); + clear_bl_bit(); + set_thread_flag(TIF_POLLING_NRFLAG); + } else + while (!need_resched()) + cpu_relax(); +} + +void cpu_idle(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); + + /* endless idle loop with no priority at all */ + while (1) { + void (*idle)(void) = pm_idle; + + if (!idle) + idle = default_idle; + + tick_nohz_stop_sched_tick(1); + while (!need_resched()) + idle(); + tick_nohz_restart_sched_tick(); + + preempt_enable_no_resched(); + schedule(); + preempt_disable(); + check_pgt_cache(); + } +} diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c new file mode 100644 index 000000000000..7c747e7d71b8 --- /dev/null +++ b/arch/sh/kernel/kgdb.c @@ -0,0 +1,285 @@ +/* + * SuperH KGDB support + * + * Copyright (C) 2008 Paul Mundt + * + * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kgdb.h> +#include <linux/kdebug.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <asm/cacheflush.h> + +char in_nmi = 0; /* Set during NMI to prevent re-entry */ + +/* Macros for single step instruction identification */ +#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) +#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) +#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \ + (((op) & 0x7f ) << 1)) +#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00) +#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00) +#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000) +#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ + (((op) & 0x7ff) << 1)) +#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023) +#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8) +#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000) +#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ + (((op) & 0x7ff) << 1)) +#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003) +#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf) +#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b) +#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf) +#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b) +#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf) +#define OPCODE_RTS(op) ((op) == 0xb) +#define OPCODE_RTE(op) ((op) == 0x2b) + +#define SR_T_BIT_MASK 0x1 +#define STEP_OPCODE 0xc33d + +/* Calculate the new address for after a step */ +static short *get_step_address(struct pt_regs *linux_regs) +{ + opcode_t op = __raw_readw(linux_regs->pc); + long addr; + + /* BT */ + if (OPCODE_BT(op)) { + if (linux_regs->sr & SR_T_BIT_MASK) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 2; + } + + /* BTS */ + else if (OPCODE_BTS(op)) { + if (linux_regs->sr & SR_T_BIT_MASK) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 4; /* Not in delay slot */ + } + + /* BF */ + else if (OPCODE_BF(op)) { + if (!(linux_regs->sr & SR_T_BIT_MASK)) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 2; + } + + /* BFS */ + else if (OPCODE_BFS(op)) { + if (!(linux_regs->sr & SR_T_BIT_MASK)) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 4; /* Not in delay slot */ + } + + /* BRA */ + else if (OPCODE_BRA(op)) + addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op); + + /* BRAF */ + else if (OPCODE_BRAF(op)) + addr = linux_regs->pc + 4 + + linux_regs->regs[OPCODE_BRAF_REG(op)]; + + /* BSR */ + else if (OPCODE_BSR(op)) + addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op); + + /* BSRF */ + else if (OPCODE_BSRF(op)) + addr = linux_regs->pc + 4 + + linux_regs->regs[OPCODE_BSRF_REG(op)]; + + /* JMP */ + else if (OPCODE_JMP(op)) + addr = linux_regs->regs[OPCODE_JMP_REG(op)]; + + /* JSR */ + else if (OPCODE_JSR(op)) + addr = linux_regs->regs[OPCODE_JSR_REG(op)]; + + /* RTS */ + else if (OPCODE_RTS(op)) + addr = linux_regs->pr; + + /* RTE */ + else if (OPCODE_RTE(op)) + addr = linux_regs->regs[15]; + + /* Other */ + else + addr = linux_regs->pc + instruction_size(op); + + flush_icache_range(addr, addr + instruction_size(op)); + return (short *)addr; +} + +/* + * Replace the instruction immediately after the current instruction + * (i.e. next in the expected flow of control) with a trap instruction, + * so that returning will cause only a single instruction to be executed. + * Note that this model is slightly broken for instructions with delay + * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the + * instruction in the delay slot will be executed. + */ + +static unsigned long stepped_address; +static opcode_t stepped_opcode; + +static void do_single_step(struct pt_regs *linux_regs) +{ + /* Determine where the target instruction will send us to */ + unsigned short *addr = get_step_address(linux_regs); + + stepped_address = (int)addr; + + /* Replace it */ + stepped_opcode = __raw_readw((long)addr); + *addr = STEP_OPCODE; + + /* Flush and return */ + flush_icache_range((long)addr, (long)addr + + instruction_size(stepped_opcode)); +} + +/* Undo a single step */ +static void undo_single_step(struct pt_regs *linux_regs) +{ + /* If we have stepped, put back the old instruction */ + /* Use stepped_address in case we stopped elsewhere */ + if (stepped_opcode != 0) { + __raw_writew(stepped_opcode, stepped_address); + flush_icache_range(stepped_address, stepped_address + 2); + } + + stepped_opcode = 0; +} + +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + int i; + + for (i = 0; i < 16; i++) + gdb_regs[GDB_R0 + i] = regs->regs[i]; + + gdb_regs[GDB_PC] = regs->pc; + gdb_regs[GDB_PR] = regs->pr; + gdb_regs[GDB_SR] = regs->sr; + gdb_regs[GDB_GBR] = regs->gbr; + gdb_regs[GDB_MACH] = regs->mach; + gdb_regs[GDB_MACL] = regs->macl; + + __asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR])); +} + +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + int i; + + for (i = 0; i < 16; i++) + regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i]; + + regs->pc = gdb_regs[GDB_PC]; + regs->pr = gdb_regs[GDB_PR]; + regs->sr = gdb_regs[GDB_SR]; + regs->gbr = gdb_regs[GDB_GBR]; + regs->mach = gdb_regs[GDB_MACH]; + regs->macl = gdb_regs[GDB_MACL]; + + __asm__ __volatile__ ("ldc %0, vbr" : : "r" (gdb_regs[GDB_VBR])); +} + +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) +{ + gdb_regs[GDB_R15] = p->thread.sp; + gdb_regs[GDB_PC] = p->thread.pc; +} + +int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + char *remcomInBuffer, char *remcomOutBuffer, + struct pt_regs *linux_regs) +{ + unsigned long addr; + char *ptr; + + /* Undo any stepping we may have done */ + undo_single_step(linux_regs); + + switch (remcomInBuffer[0]) { + case 'c': + case 's': + /* try to read optional parameter, pc unchanged if no parm */ + ptr = &remcomInBuffer[1]; + if (kgdb_hex2long(&ptr, &addr)) + linux_regs->pc = addr; + case 'D': + case 'k': + atomic_set(&kgdb_cpu_doing_single_step, -1); + + if (remcomInBuffer[0] == 's') { + do_single_step(linux_regs); + kgdb_single_step = 1; + + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } + + return 0; + } + + /* this means that we do not want to exit from the handler: */ + return -1; +} + +/* + * The primary entry points for the kgdb debug trap table entries. + */ +BUILD_TRAP_HANDLER(singlestep) +{ + unsigned long flags; + TRAP_HANDLER_DECL; + + local_irq_save(flags); + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs); + local_irq_restore(flags); +} + + +BUILD_TRAP_HANDLER(breakpoint) +{ + unsigned long flags; + TRAP_HANDLER_DECL; + + local_irq_save(flags); + kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs); + local_irq_restore(flags); +} + +int kgdb_arch_init(void) +{ + return 0; +} + +void kgdb_arch_exit(void) +{ +} + +struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: trapa #0x3c */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN + .gdb_bpt_instr = { 0x3c, 0xc3 }, +#else + .gdb_bpt_instr = { 0xc3, 0x3c }, +#endif +}; diff --git a/arch/sh/kernel/kgdb_jmp.S b/arch/sh/kernel/kgdb_jmp.S deleted file mode 100644 index 339bb1d7ff0b..000000000000 --- a/arch/sh/kernel/kgdb_jmp.S +++ /dev/null @@ -1,33 +0,0 @@ -#include <linux/linkage.h> - -ENTRY(setjmp) - add #(9*4), r4 - sts.l pr, @-r4 - mov.l r15, @-r4 - mov.l r14, @-r4 - mov.l r13, @-r4 - mov.l r12, @-r4 - mov.l r11, @-r4 - mov.l r10, @-r4 - mov.l r9, @-r4 - mov.l r8, @-r4 - rts - mov #0, r0 - -ENTRY(longjmp) - mov.l @r4+, r8 - mov.l @r4+, r9 - mov.l @r4+, r10 - mov.l @r4+, r11 - mov.l @r4+, r12 - mov.l @r4+, r13 - mov.l @r4+, r14 - mov.l @r4+, r15 - lds.l @r4+, pr - mov r5, r0 - tst r0, r0 - bf 1f - mov #1, r0 ! in case val==0 -1: rts - nop - diff --git a/arch/sh/kernel/kgdb_stub.c b/arch/sh/kernel/kgdb_stub.c deleted file mode 100644 index bf8ac4c71640..000000000000 --- a/arch/sh/kernel/kgdb_stub.c +++ /dev/null @@ -1,1052 +0,0 @@ -/* - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Contains extracts from code by Glenn Engel, Jim Kingdon, - * David Grothe <dave@gcom.com>, Tigran Aivazian <tigran@sco.com>, - * Amit S. Kale <akale@veritas.com>, William Gatliff <bgat@open-widgets.com>, - * Ben Lee, Steve Chamberlain and Benoit Miller <fulg@iname.com>. - * - * This version by Henry Bell <henry.bell@st.com> - * Minor modifications by Jeremy Siegel <jsiegel@mvista.com> - * - * Contains low-level support for remote debug using GDB. - * - * To enable debugger support, two things need to happen. A call to - * set_debug_traps() is necessary in order to allow any breakpoints - * or error conditions to be properly intercepted and reported to gdb. - * A breakpoint also needs to be generated to begin communication. This - * is most easily accomplished by a call to breakpoint() which does - * a trapa if the initialisation phase has been successfully completed. - * - * In this case, set_debug_traps() is not used to "take over" exceptions; - * other kernel code is modified instead to enter the kgdb functions here - * when appropriate (see entry.S for breakpoint traps and NMI interrupts, - * see traps.c for kernel error exceptions). - * - * The following gdb commands are supported: - * - * Command Function Return value - * - * g return the value of the CPU registers hex data or ENN - * G set the value of the CPU registers OK or ENN - * - * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN - * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN - * XAA..AA,LLLL: Same, but data is binary (not hex) OK or ENN - * - * c Resume at current address SNN ( signal NN) - * cAA..AA Continue at address AA..AA SNN - * CNN; Resume at current address with signal SNN - * CNN;AA..AA Resume at address AA..AA with signal SNN - * - * s Step one instruction SNN - * sAA..AA Step one instruction from AA..AA SNN - * SNN; Step one instruction with signal SNN - * SNNAA..AA Step one instruction from AA..AA w/NN SNN - * - * k kill (Detach GDB) - * - * d Toggle debug flag - * D Detach GDB - * - * Hct Set thread t for operations, OK or ENN - * c = 'c' (step, cont), c = 'g' (other - * operations) - * - * qC Query current thread ID QCpid - * qfThreadInfo Get list of current threads (first) m<id> - * qsThreadInfo " " " " " (subsequent) - * qOffsets Get section offsets Text=x;Data=y;Bss=z - * - * TXX Find if thread XX is alive OK or ENN - * ? What was the last sigval ? SNN (signal NN) - * O Output to GDB console - * - * Remote communication protocol. - * - * A debug packet whose contents are <data> is encapsulated for - * transmission in the form: - * - * $ <data> # CSUM1 CSUM2 - * - * <data> must be ASCII alphanumeric and cannot include characters - * '$' or '#'. If <data> starts with two characters followed by - * ':', then the existing stubs interpret this as a sequence number. - * - * CSUM1 and CSUM2 are ascii hex representation of an 8-bit - * checksum of <data>, the most significant nibble is sent first. - * the hex digits 0-9,a-f are used. - * - * Receiver responds with: - * - * + - if CSUM is correct and ready for next packet - * - - if CSUM is incorrect - * - * Responses can be run-length encoded to save space. A '*' means that - * the next character is an ASCII encoding giving a repeat count which - * stands for that many repetitions of the character preceding the '*'. - * The encoding is n+29, yielding a printable character where n >=3 - * (which is where RLE starts to win). Don't use an n > 126. - * - * So "0* " means the same as "0000". - */ - -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/spinlock.h> -#include <linux/delay.h> -#include <linux/linkage.h> -#include <linux/init.h> -#include <linux/console.h> -#include <linux/sysrq.h> -#include <linux/module.h> -#include <asm/system.h> -#include <asm/cacheflush.h> -#include <asm/current.h> -#include <asm/signal.h> -#include <asm/pgtable.h> -#include <asm/ptrace.h> -#include <asm/kgdb.h> -#include <asm/io.h> - -/* Function pointers for linkage */ -kgdb_debug_hook_t *kgdb_debug_hook; -kgdb_bus_error_hook_t *kgdb_bus_err_hook; - -int (*kgdb_getchar)(void); -EXPORT_SYMBOL_GPL(kgdb_getchar); -void (*kgdb_putchar)(int); -EXPORT_SYMBOL_GPL(kgdb_putchar); - -static void put_debug_char(int c) -{ - if (!kgdb_putchar) - return; - (*kgdb_putchar)(c); -} -static int get_debug_char(void) -{ - if (!kgdb_getchar) - return -1; - return (*kgdb_getchar)(); -} - -/* Num chars in in/out bound buffers, register packets need NUMREGBYTES * 2 */ -#define BUFMAX 1024 -#define NUMREGBYTES (MAXREG*4) -#define OUTBUFMAX (NUMREGBYTES*2+512) - -enum { - R0 = 0, R1, R2, R3, R4, R5, R6, R7, - R8, R9, R10, R11, R12, R13, R14, R15, - PC, PR, GBR, VBR, MACH, MACL, SR, - /* */ - MAXREG -}; - -static unsigned int registers[MAXREG]; -struct kgdb_regs trap_registers; - -char kgdb_in_gdb_mode; -char in_nmi; /* Set during NMI to prevent reentry */ -int kgdb_nofault; /* Boolean to ignore bus errs (i.e. in GDB) */ - -/* Default values for SCI (can override via kernel args in setup.c) */ -#ifndef CONFIG_KGDB_DEFPORT -#define CONFIG_KGDB_DEFPORT 1 -#endif - -#ifndef CONFIG_KGDB_DEFBAUD -#define CONFIG_KGDB_DEFBAUD 115200 -#endif - -#if defined(CONFIG_KGDB_DEFPARITY_E) -#define CONFIG_KGDB_DEFPARITY 'E' -#elif defined(CONFIG_KGDB_DEFPARITY_O) -#define CONFIG_KGDB_DEFPARITY 'O' -#else /* CONFIG_KGDB_DEFPARITY_N */ -#define CONFIG_KGDB_DEFPARITY 'N' -#endif - -#ifdef CONFIG_KGDB_DEFBITS_7 -#define CONFIG_KGDB_DEFBITS '7' -#else /* CONFIG_KGDB_DEFBITS_8 */ -#define CONFIG_KGDB_DEFBITS '8' -#endif - -/* SCI/UART settings, used in kgdb_console_setup() */ -int kgdb_portnum = CONFIG_KGDB_DEFPORT; -EXPORT_SYMBOL_GPL(kgdb_portnum); -int kgdb_baud = CONFIG_KGDB_DEFBAUD; -EXPORT_SYMBOL_GPL(kgdb_baud); -char kgdb_parity = CONFIG_KGDB_DEFPARITY; -EXPORT_SYMBOL_GPL(kgdb_parity); -char kgdb_bits = CONFIG_KGDB_DEFBITS; -EXPORT_SYMBOL_GPL(kgdb_bits); - -/* Jump buffer for setjmp/longjmp */ -static jmp_buf rem_com_env; - -/* TRA differs sh3/4 */ -#if defined(CONFIG_CPU_SH3) -#define TRA 0xffffffd0 -#elif defined(CONFIG_CPU_SH4) -#define TRA 0xff000020 -#endif - -/* Macros for single step instruction identification */ -#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) -#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) -#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \ - (((op) & 0x7f ) << 1)) -#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00) -#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00) -#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000) -#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ - (((op) & 0x7ff) << 1)) -#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023) -#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8) -#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000) -#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ - (((op) & 0x7ff) << 1)) -#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003) -#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf) -#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b) -#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf) -#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b) -#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf) -#define OPCODE_RTS(op) ((op) == 0xb) -#define OPCODE_RTE(op) ((op) == 0x2b) - -#define SR_T_BIT_MASK 0x1 -#define STEP_OPCODE 0xc320 -#define BIOS_CALL_TRAP 0x3f - -/* Exception codes as per SH-4 core manual */ -#define ADDRESS_ERROR_LOAD_VEC 7 -#define ADDRESS_ERROR_STORE_VEC 8 -#define TRAP_VEC 11 -#define INVALID_INSN_VEC 12 -#define INVALID_SLOT_VEC 13 -#define NMI_VEC 14 -#define USER_BREAK_VEC 15 -#define SERIAL_BREAK_VEC 58 - -/* Misc static */ -static int stepped_address; -static short stepped_opcode; -static char in_buffer[BUFMAX]; -static char out_buffer[OUTBUFMAX]; - -static void kgdb_to_gdb(const char *s); - -/* Convert ch to hex */ -static int hex(const char ch) -{ - if ((ch >= 'a') && (ch <= 'f')) - return (ch - 'a' + 10); - if ((ch >= '0') && (ch <= '9')) - return (ch - '0'); - if ((ch >= 'A') && (ch <= 'F')) - return (ch - 'A' + 10); - return (-1); -} - -/* Convert the memory pointed to by mem into hex, placing result in buf. - Returns a pointer to the last char put in buf (null) */ -static char *mem_to_hex(const char *mem, char *buf, const int count) -{ - int i; - int ch; - unsigned short s_val; - unsigned long l_val; - - /* Check for 16 or 32 */ - if (count == 2 && ((long) mem & 1) == 0) { - s_val = *(unsigned short *) mem; - mem = (char *) &s_val; - } else if (count == 4 && ((long) mem & 3) == 0) { - l_val = *(unsigned long *) mem; - mem = (char *) &l_val; - } - for (i = 0; i < count; i++) { - ch = *mem++; - buf = pack_hex_byte(buf, ch); - } - *buf = 0; - return (buf); -} - -/* Convert the hex array pointed to by buf into binary, to be placed in mem. - Return a pointer to the character after the last byte written */ -static char *hex_to_mem(const char *buf, char *mem, const int count) -{ - int i; - unsigned char ch; - - for (i = 0; i < count; i++) { - ch = hex(*buf++) << 4; - ch = ch + hex(*buf++); - *mem++ = ch; - } - return (mem); -} - -/* While finding valid hex chars, convert to an integer, then return it */ -static int hex_to_int(char **ptr, int *int_value) -{ - int num_chars = 0; - int hex_value; - - *int_value = 0; - - while (**ptr) { - hex_value = hex(**ptr); - if (hex_value >= 0) { - *int_value = (*int_value << 4) | hex_value; - num_chars++; - } else - break; - (*ptr)++; - } - return num_chars; -} - -/* Copy the binary array pointed to by buf into mem. Fix $, #, - and 0x7d escaped with 0x7d. Return a pointer to the character - after the last byte written. */ -static char *ebin_to_mem(const char *buf, char *mem, int count) -{ - for (; count > 0; count--, buf++) { - if (*buf == 0x7d) - *mem++ = *(++buf) ^ 0x20; - else - *mem++ = *buf; - } - return mem; -} - -/* Scan for the start char '$', read the packet and check the checksum */ -static void get_packet(char *buffer, int buflen) -{ - unsigned char checksum; - unsigned char xmitcsum; - int i; - int count; - char ch; - - do { - /* Ignore everything until the start character */ - while ((ch = get_debug_char()) != '$'); - - checksum = 0; - xmitcsum = -1; - count = 0; - - /* Now, read until a # or end of buffer is found */ - while (count < (buflen - 1)) { - ch = get_debug_char(); - - if (ch == '#') - break; - - checksum = checksum + ch; - buffer[count] = ch; - count = count + 1; - } - - buffer[count] = 0; - - /* Continue to read checksum following # */ - if (ch == '#') { - xmitcsum = hex(get_debug_char()) << 4; - xmitcsum += hex(get_debug_char()); - - /* Checksum */ - if (checksum != xmitcsum) - put_debug_char('-'); /* Failed checksum */ - else { - /* Ack successful transfer */ - put_debug_char('+'); - - /* If a sequence char is present, reply - the sequence ID */ - if (buffer[2] == ':') { - put_debug_char(buffer[0]); - put_debug_char(buffer[1]); - - /* Remove sequence chars from buffer */ - count = strlen(buffer); - for (i = 3; i <= count; i++) - buffer[i - 3] = buffer[i]; - } - } - } - } - while (checksum != xmitcsum); /* Keep trying while we fail */ -} - -/* Send the packet in the buffer with run-length encoding */ -static void put_packet(char *buffer) -{ - int checksum; - char *src; - int runlen; - int encode; - - do { - src = buffer; - put_debug_char('$'); - checksum = 0; - - /* Continue while we still have chars left */ - while (*src) { - /* Check for runs up to 99 chars long */ - for (runlen = 1; runlen < 99; runlen++) { - if (src[0] != src[runlen]) - break; - } - - if (runlen > 3) { - /* Got a useful amount, send encoding */ - encode = runlen + ' ' - 4; - put_debug_char(*src); checksum += *src; - put_debug_char('*'); checksum += '*'; - put_debug_char(encode); checksum += encode; - src += runlen; - } else { - /* Otherwise just send the current char */ - put_debug_char(*src); checksum += *src; - src += 1; - } - } - - /* '#' Separator, put high and low components of checksum */ - put_debug_char('#'); - put_debug_char(hex_asc_hi(checksum)); - put_debug_char(hex_asc_lo(checksum)); - } - while ((get_debug_char()) != '+'); /* While no ack */ -} - -/* A bus error has occurred - perform a longjmp to return execution and - allow handling of the error */ -static void kgdb_handle_bus_error(void) -{ - longjmp(rem_com_env, 1); -} - -/* Translate SH-3/4 exception numbers to unix-like signal values */ -static int compute_signal(const int excep_code) -{ - int sigval; - - switch (excep_code) { - - case INVALID_INSN_VEC: - case INVALID_SLOT_VEC: - sigval = SIGILL; - break; - case ADDRESS_ERROR_LOAD_VEC: - case ADDRESS_ERROR_STORE_VEC: - sigval = SIGSEGV; - break; - - case SERIAL_BREAK_VEC: - case NMI_VEC: - sigval = SIGINT; - break; - - case USER_BREAK_VEC: - case TRAP_VEC: - sigval = SIGTRAP; - break; - - default: - sigval = SIGBUS; /* "software generated" */ - break; - } - - return (sigval); -} - -/* Make a local copy of the registers passed into the handler (bletch) */ -static void kgdb_regs_to_gdb_regs(const struct kgdb_regs *regs, - int *gdb_regs) -{ - gdb_regs[R0] = regs->regs[R0]; - gdb_regs[R1] = regs->regs[R1]; - gdb_regs[R2] = regs->regs[R2]; - gdb_regs[R3] = regs->regs[R3]; - gdb_regs[R4] = regs->regs[R4]; - gdb_regs[R5] = regs->regs[R5]; - gdb_regs[R6] = regs->regs[R6]; - gdb_regs[R7] = regs->regs[R7]; - gdb_regs[R8] = regs->regs[R8]; - gdb_regs[R9] = regs->regs[R9]; - gdb_regs[R10] = regs->regs[R10]; - gdb_regs[R11] = regs->regs[R11]; - gdb_regs[R12] = regs->regs[R12]; - gdb_regs[R13] = regs->regs[R13]; - gdb_regs[R14] = regs->regs[R14]; - gdb_regs[R15] = regs->regs[R15]; - gdb_regs[PC] = regs->pc; - gdb_regs[PR] = regs->pr; - gdb_regs[GBR] = regs->gbr; - gdb_regs[MACH] = regs->mach; - gdb_regs[MACL] = regs->macl; - gdb_regs[SR] = regs->sr; - gdb_regs[VBR] = regs->vbr; -} - -/* Copy local gdb registers back to kgdb regs, for later copy to kernel */ -static void gdb_regs_to_kgdb_regs(const int *gdb_regs, - struct kgdb_regs *regs) -{ - regs->regs[R0] = gdb_regs[R0]; - regs->regs[R1] = gdb_regs[R1]; - regs->regs[R2] = gdb_regs[R2]; - regs->regs[R3] = gdb_regs[R3]; - regs->regs[R4] = gdb_regs[R4]; - regs->regs[R5] = gdb_regs[R5]; - regs->regs[R6] = gdb_regs[R6]; - regs->regs[R7] = gdb_regs[R7]; - regs->regs[R8] = gdb_regs[R8]; - regs->regs[R9] = gdb_regs[R9]; - regs->regs[R10] = gdb_regs[R10]; - regs->regs[R11] = gdb_regs[R11]; - regs->regs[R12] = gdb_regs[R12]; - regs->regs[R13] = gdb_regs[R13]; - regs->regs[R14] = gdb_regs[R14]; - regs->regs[R15] = gdb_regs[R15]; - regs->pc = gdb_regs[PC]; - regs->pr = gdb_regs[PR]; - regs->gbr = gdb_regs[GBR]; - regs->mach = gdb_regs[MACH]; - regs->macl = gdb_regs[MACL]; - regs->sr = gdb_regs[SR]; - regs->vbr = gdb_regs[VBR]; -} - -/* Calculate the new address for after a step */ -static short *get_step_address(void) -{ - short op = *(short *) trap_registers.pc; - long addr; - - /* BT */ - if (OPCODE_BT(op)) { - if (trap_registers.sr & SR_T_BIT_MASK) - addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op); - else - addr = trap_registers.pc + 2; - } - - /* BTS */ - else if (OPCODE_BTS(op)) { - if (trap_registers.sr & SR_T_BIT_MASK) - addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op); - else - addr = trap_registers.pc + 4; /* Not in delay slot */ - } - - /* BF */ - else if (OPCODE_BF(op)) { - if (!(trap_registers.sr & SR_T_BIT_MASK)) - addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op); - else - addr = trap_registers.pc + 2; - } - - /* BFS */ - else if (OPCODE_BFS(op)) { - if (!(trap_registers.sr & SR_T_BIT_MASK)) - addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op); - else - addr = trap_registers.pc + 4; /* Not in delay slot */ - } - - /* BRA */ - else if (OPCODE_BRA(op)) - addr = trap_registers.pc + 4 + OPCODE_BRA_DISP(op); - - /* BRAF */ - else if (OPCODE_BRAF(op)) - addr = trap_registers.pc + 4 - + trap_registers.regs[OPCODE_BRAF_REG(op)]; - - /* BSR */ - else if (OPCODE_BSR(op)) - addr = trap_registers.pc + 4 + OPCODE_BSR_DISP(op); - - /* BSRF */ - else if (OPCODE_BSRF(op)) - addr = trap_registers.pc + 4 - + trap_registers.regs[OPCODE_BSRF_REG(op)]; - - /* JMP */ - else if (OPCODE_JMP(op)) - addr = trap_registers.regs[OPCODE_JMP_REG(op)]; - - /* JSR */ - else if (OPCODE_JSR(op)) - addr = trap_registers.regs[OPCODE_JSR_REG(op)]; - - /* RTS */ - else if (OPCODE_RTS(op)) - addr = trap_registers.pr; - - /* RTE */ - else if (OPCODE_RTE(op)) - addr = trap_registers.regs[15]; - - /* Other */ - else - addr = trap_registers.pc + 2; - - flush_icache_range(addr, addr + 2); - return (short *) addr; -} - -/* Set up a single-step. Replace the instruction immediately after the - current instruction (i.e. next in the expected flow of control) with a - trap instruction, so that returning will cause only a single instruction - to be executed. Note that this model is slightly broken for instructions - with delay slots (e.g. B[TF]S, BSR, BRA etc), where both the branch - and the instruction in the delay slot will be executed. */ -static void do_single_step(void) -{ - unsigned short *addr = 0; - - /* Determine where the target instruction will send us to */ - addr = get_step_address(); - stepped_address = (int)addr; - - /* Replace it */ - stepped_opcode = *(short *)addr; - *addr = STEP_OPCODE; - - /* Flush and return */ - flush_icache_range((long) addr, (long) addr + 2); -} - -/* Undo a single step */ -static void undo_single_step(void) -{ - /* If we have stepped, put back the old instruction */ - /* Use stepped_address in case we stopped elsewhere */ - if (stepped_opcode != 0) { - *(short*)stepped_address = stepped_opcode; - flush_icache_range(stepped_address, stepped_address + 2); - } - stepped_opcode = 0; -} - -/* Send a signal message */ -static void send_signal_msg(const int signum) -{ - out_buffer[0] = 'S'; - out_buffer[1] = hex_asc_hi(signum); - out_buffer[2] = hex_asc_lo(signum); - out_buffer[3] = 0; - put_packet(out_buffer); -} - -/* Reply that all was well */ -static void send_ok_msg(void) -{ - strcpy(out_buffer, "OK"); - put_packet(out_buffer); -} - -/* Reply that an error occurred */ -static void send_err_msg(void) -{ - strcpy(out_buffer, "E01"); - put_packet(out_buffer); -} - -/* Empty message indicates unrecognised command */ -static void send_empty_msg(void) -{ - put_packet(""); -} - -/* Read memory due to 'm' message */ -static void read_mem_msg(void) -{ - char *ptr; - int addr; - int length; - - /* Jmp, disable bus error handler */ - if (setjmp(rem_com_env) == 0) { - - kgdb_nofault = 1; - - /* Walk through, have m<addr>,<length> */ - ptr = &in_buffer[1]; - if (hex_to_int(&ptr, &addr) && (*ptr++ == ',')) - if (hex_to_int(&ptr, &length)) { - ptr = 0; - if (length * 2 > OUTBUFMAX) - length = OUTBUFMAX / 2; - mem_to_hex((char *) addr, out_buffer, length); - } - if (ptr) - send_err_msg(); - else - put_packet(out_buffer); - } else - send_err_msg(); - - /* Restore bus error handler */ - kgdb_nofault = 0; -} - -/* Write memory due to 'M' or 'X' message */ -static void write_mem_msg(int binary) -{ - char *ptr; - int addr; - int length; - - if (setjmp(rem_com_env) == 0) { - - kgdb_nofault = 1; - - /* Walk through, have M<addr>,<length>:<data> */ - ptr = &in_buffer[1]; - if (hex_to_int(&ptr, &addr) && (*ptr++ == ',')) - if (hex_to_int(&ptr, &length) && (*ptr++ == ':')) { - if (binary) - ebin_to_mem(ptr, (char*)addr, length); - else - hex_to_mem(ptr, (char*)addr, length); - flush_icache_range(addr, addr + length); - ptr = 0; - send_ok_msg(); - } - if (ptr) - send_err_msg(); - } else - send_err_msg(); - - /* Restore bus error handler */ - kgdb_nofault = 0; -} - -/* Continue message */ -static void continue_msg(void) -{ - /* Try to read optional parameter, PC unchanged if none */ - char *ptr = &in_buffer[1]; - int addr; - - if (hex_to_int(&ptr, &addr)) - trap_registers.pc = addr; -} - -/* Continue message with signal */ -static void continue_with_sig_msg(void) -{ - int signal; - char *ptr = &in_buffer[1]; - int addr; - - /* Report limitation */ - kgdb_to_gdb("Cannot force signal in kgdb, continuing anyway.\n"); - - /* Signal */ - hex_to_int(&ptr, &signal); - if (*ptr == ';') - ptr++; - - /* Optional address */ - if (hex_to_int(&ptr, &addr)) - trap_registers.pc = addr; -} - -/* Step message */ -static void step_msg(void) -{ - continue_msg(); - do_single_step(); -} - -/* Step message with signal */ -static void step_with_sig_msg(void) -{ - continue_with_sig_msg(); - do_single_step(); -} - -/* Send register contents */ -static void send_regs_msg(void) -{ - kgdb_regs_to_gdb_regs(&trap_registers, registers); - mem_to_hex((char *) registers, out_buffer, NUMREGBYTES); - put_packet(out_buffer); -} - -/* Set register contents - currently can't set other thread's registers */ -static void set_regs_msg(void) -{ - kgdb_regs_to_gdb_regs(&trap_registers, registers); - hex_to_mem(&in_buffer[1], (char *) registers, NUMREGBYTES); - gdb_regs_to_kgdb_regs(registers, &trap_registers); - send_ok_msg(); -} - -#ifdef CONFIG_SH_KGDB_CONSOLE -/* - * Bring up the ports.. - */ -static int __init kgdb_serial_setup(void) -{ - struct console dummy; - return kgdb_console_setup(&dummy, 0); -} -#else -#define kgdb_serial_setup() 0 -#endif - -/* The command loop, read and act on requests */ -static void kgdb_command_loop(const int excep_code, const int trapa_value) -{ - int sigval; - - /* Enter GDB mode (e.g. after detach) */ - if (!kgdb_in_gdb_mode) { - /* Do serial setup, notify user, issue preemptive ack */ - printk(KERN_NOTICE "KGDB: Waiting for GDB\n"); - kgdb_in_gdb_mode = 1; - put_debug_char('+'); - } - - /* Reply to host that an exception has occurred */ - sigval = compute_signal(excep_code); - send_signal_msg(sigval); - - /* TRAP_VEC exception indicates a software trap inserted in place of - code by GDB so back up PC by one instruction, as this instruction - will later be replaced by its original one. Do NOT do this for - trap 0xff, since that indicates a compiled-in breakpoint which - will not be replaced (and we would retake the trap forever) */ - if ((excep_code == TRAP_VEC) && (trapa_value != (0x3c << 2))) - trap_registers.pc -= 2; - - /* Undo any stepping we may have done */ - undo_single_step(); - - while (1) { - out_buffer[0] = 0; - get_packet(in_buffer, BUFMAX); - - /* Examine first char of buffer to see what we need to do */ - switch (in_buffer[0]) { - case '?': /* Send which signal we've received */ - send_signal_msg(sigval); - break; - - case 'g': /* Return the values of the CPU registers */ - send_regs_msg(); - break; - - case 'G': /* Set the value of the CPU registers */ - set_regs_msg(); - break; - - case 'm': /* Read LLLL bytes address AA..AA */ - read_mem_msg(); - break; - - case 'M': /* Write LLLL bytes address AA..AA, ret OK */ - write_mem_msg(0); /* 0 = data in hex */ - break; - - case 'X': /* Write LLLL bytes esc bin address AA..AA */ - if (kgdb_bits == '8') - write_mem_msg(1); /* 1 = data in binary */ - else - send_empty_msg(); - break; - - case 'C': /* Continue, signum included, we ignore it */ - continue_with_sig_msg(); - return; - - case 'c': /* Continue at address AA..AA (optional) */ - continue_msg(); - return; - - case 'S': /* Step, signum included, we ignore it */ - step_with_sig_msg(); - return; - - case 's': /* Step one instruction from AA..AA */ - step_msg(); - return; - - case 'k': /* 'Kill the program' with a kernel ? */ - break; - - case 'D': /* Detach from program, send reply OK */ - kgdb_in_gdb_mode = 0; - send_ok_msg(); - get_debug_char(); - return; - - default: - send_empty_msg(); - break; - } - } -} - -/* There has been an exception, most likely a breakpoint. */ -static void handle_exception(struct pt_regs *regs) -{ - int excep_code, vbr_val; - int count; - int trapa_value = ctrl_inl(TRA); - - /* Copy kernel regs (from stack) */ - for (count = 0; count < 16; count++) - trap_registers.regs[count] = regs->regs[count]; - trap_registers.pc = regs->pc; - trap_registers.pr = regs->pr; - trap_registers.sr = regs->sr; - trap_registers.gbr = regs->gbr; - trap_registers.mach = regs->mach; - trap_registers.macl = regs->macl; - - asm("stc vbr, %0":"=r"(vbr_val)); - trap_registers.vbr = vbr_val; - - /* Get excode for command loop call, user access */ - asm("stc r2_bank, %0":"=r"(excep_code)); - - /* Act on the exception */ - kgdb_command_loop(excep_code, trapa_value); - - /* Copy back the (maybe modified) registers */ - for (count = 0; count < 16; count++) - regs->regs[count] = trap_registers.regs[count]; - regs->pc = trap_registers.pc; - regs->pr = trap_registers.pr; - regs->sr = trap_registers.sr; - regs->gbr = trap_registers.gbr; - regs->mach = trap_registers.mach; - regs->macl = trap_registers.macl; - - vbr_val = trap_registers.vbr; - asm("ldc %0, vbr": :"r"(vbr_val)); -} - -asmlinkage void kgdb_handle_exception(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs) -{ - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - handle_exception(regs); -} - -/* Initialise the KGDB data structures and serial configuration */ -int __init kgdb_init(void) -{ - in_nmi = 0; - kgdb_nofault = 0; - stepped_opcode = 0; - kgdb_in_gdb_mode = 0; - - if (kgdb_serial_setup() != 0) { - printk(KERN_NOTICE "KGDB: serial setup error\n"); - return -1; - } - - /* Init ptr to exception handler */ - kgdb_debug_hook = handle_exception; - kgdb_bus_err_hook = kgdb_handle_bus_error; - - /* Enter kgdb now if requested, or just report init done */ - printk(KERN_NOTICE "KGDB: stub is initialized.\n"); - - return 0; -} - -/* Make function available for "user messages"; console will use it too. */ - -char gdbmsgbuf[BUFMAX]; -#define MAXOUT ((BUFMAX-2)/2) - -static void kgdb_msg_write(const char *s, unsigned count) -{ - int i; - int wcount; - char *bufptr; - - /* 'O'utput */ - gdbmsgbuf[0] = 'O'; - - /* Fill and send buffers... */ - while (count > 0) { - bufptr = gdbmsgbuf + 1; - - /* Calculate how many this time */ - wcount = (count > MAXOUT) ? MAXOUT : count; - - /* Pack in hex chars */ - for (i = 0; i < wcount; i++) - bufptr = pack_hex_byte(bufptr, s[i]); - *bufptr = '\0'; - - /* Move up */ - s += wcount; - count -= wcount; - - /* Write packet */ - put_packet(gdbmsgbuf); - } -} - -static void kgdb_to_gdb(const char *s) -{ - kgdb_msg_write(s, strlen(s)); -} - -#ifdef CONFIG_SH_KGDB_CONSOLE -void kgdb_console_write(struct console *co, const char *s, unsigned count) -{ - /* Bail if we're not talking to GDB */ - if (!kgdb_in_gdb_mode) - return; - - kgdb_msg_write(s, count); -} -#endif - -#ifdef CONFIG_KGDB_SYSRQ -static void sysrq_handle_gdb(int key, struct tty_struct *tty) -{ - printk("Entering GDB stub\n"); - breakpoint(); -} - -static struct sysrq_key_op sysrq_gdb_op = { - .handler = sysrq_handle_gdb, - .help_msg = "Gdb", - .action_msg = "GDB", -}; - -static int gdb_register_sysrq(void) -{ - printk("Registering GDB sysrq handler\n"); - register_sysrq_key('g', &sysrq_gdb_op); - return 0; -} -module_init(gdb_register_sysrq); -#endif diff --git a/arch/sh/kernel/pm.c b/arch/sh/kernel/pm.c deleted file mode 100644 index 10ab62c9aede..000000000000 --- a/arch/sh/kernel/pm.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Generic Power Management Routine - * - * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License. - */ -#include <linux/suspend.h> -#include <linux/delay.h> -#include <linux/gfp.h> -#include <asm/freq.h> -#include <asm/io.h> -#include <asm/watchdog.h> -#include <asm/pm.h> - -#define INTR_OFFSET 0x600 - -#define STBCR 0xffffff82 -#define STBCR2 0xffffff88 - -#define STBCR_STBY 0x80 -#define STBCR_MSTP2 0x04 - -#define MCR 0xffffff68 -#define RTCNT 0xffffff70 - -#define MCR_RMODE 2 -#define MCR_RFSH 4 - -void pm_enter(void) -{ - u8 stbcr, csr; - u16 frqcr, mcr; - u32 vbr_new, vbr_old; - - set_bl_bit(); - - /* set wdt */ - csr = sh_wdt_read_csr(); - csr &= ~WTCSR_TME; - csr |= WTCSR_CKS_4096; - sh_wdt_write_csr(csr); - csr = sh_wdt_read_csr(); - sh_wdt_write_cnt(0); - - /* disable PLL1 */ - frqcr = ctrl_inw(FRQCR); - frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY); - ctrl_outw(frqcr, FRQCR); - - /* enable standby */ - stbcr = ctrl_inb(STBCR); - ctrl_outb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR); - - /* set self-refresh */ - mcr = ctrl_inw(MCR); - ctrl_outw(mcr & ~MCR_RFSH, MCR); - - /* set interrupt handler */ - asm volatile("stc vbr, %0" : "=r" (vbr_old)); - vbr_new = get_zeroed_page(GFP_ATOMIC); - udelay(50); - memcpy((void*)(vbr_new + INTR_OFFSET), - &wakeup_start, &wakeup_end - &wakeup_start); - asm volatile("ldc %0, vbr" : : "r" (vbr_new)); - - ctrl_outw(0, RTCNT); - ctrl_outw(mcr | MCR_RFSH | MCR_RMODE, MCR); - - cpu_sleep(); - - asm volatile("ldc %0, vbr" : : "r" (vbr_old)); - - free_page(vbr_new); - - /* enable PLL1 */ - frqcr = ctrl_inw(FRQCR); - frqcr |= FRQCR_PSTBY; - ctrl_outw(frqcr, FRQCR); - udelay(50); - frqcr |= FRQCR_PLLEN; - ctrl_outw(frqcr, FRQCR); - - ctrl_outb(stbcr, STBCR); - - clear_bl_bit(); -} diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index b965f0282c7d..ddafbbbab2ab 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -32,65 +32,8 @@ #include <asm/fpu.h> #include <asm/syscalls.h> -static int hlt_counter; int ubc_usercnt = 0; -void (*pm_idle)(void); -void (*pm_power_off)(void); -EXPORT_SYMBOL(pm_power_off); - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} -__setup("nohlt", nohlt_setup); - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} -__setup("hlt", hlt_setup); - -static void default_idle(void) -{ - if (!hlt_counter) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - set_bl_bit(); - while (!need_resched()) - cpu_sleep(); - clear_bl_bit(); - set_thread_flag(TIF_POLLING_NRFLAG); - } else - while (!need_resched()) - cpu_relax(); -} - -void cpu_idle(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - while (1) { - void (*idle)(void) = pm_idle; - - if (!idle) - idle = default_idle; - - tick_nohz_stop_sched_tick(1); - while (!need_resched()) - idle(); - tick_nohz_restart_sched_tick(); - - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - check_pgt_cache(); - } -} - void machine_restart(char * __unused) { /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ @@ -115,8 +58,8 @@ void machine_power_off(void) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm); - printk("CPU : %d %s (%s %.*s)\n", + printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm); + printk("CPU : %d \t\t%s (%s %.*s)\n\n", smp_processor_id(), print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); @@ -148,26 +91,16 @@ void show_regs(struct pt_regs * regs) regs->mach, regs->macl, regs->gbr, regs->pr); show_trace(NULL, (unsigned long *)regs->regs[15], regs); + show_code(regs); } /* * Create a kernel thread */ - -/* - * This is the mechanism for creating a new kernel thread. - * - */ -extern void kernel_thread_helper(void); -__asm__(".align 5\n" - "kernel_thread_helper:\n\t" - "jsr @r5\n\t" - " nop\n\t" - "mov.l 1f, r1\n\t" - "jsr @r1\n\t" - " mov r0, r4\n\t" - ".align 2\n\t" - "1:.long do_exit"); +ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) +{ + do_exit(fn(arg)); +} /* Don't use this in BL=1(cli). Or else, CPU resets! */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index b7aa09235b51..a7e5f2e74bac 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -23,7 +23,6 @@ #include <linux/reboot.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/proc_fs.h> #include <linux/io.h> #include <asm/syscalls.h> #include <asm/uaccess.h> @@ -33,56 +32,6 @@ struct task_struct *last_task_used_math = NULL; -static int hlt_counter = 1; - -#define HARD_IDLE_TIMEOUT (HZ / 3) - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} - -__setup("nohlt", nohlt_setup); -__setup("hlt", hlt_setup); - -static inline void hlt(void) -{ - __asm__ __volatile__ ("sleep" : : : "memory"); -} - -/* - * The idle loop on a uniprocessor SH.. - */ -void cpu_idle(void) -{ - /* endless idle loop with no priority at all */ - while (1) { - if (hlt_counter) { - while (!need_resched()) - cpu_relax(); - } else { - local_irq_disable(); - while (!need_resched()) { - local_irq_enable(); - hlt(); - local_irq_disable(); - } - local_irq_enable(); - } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } - -} - void machine_restart(char * __unused) { extern void phys_stext(void); @@ -97,13 +46,6 @@ void machine_halt(void) void machine_power_off(void) { -#if 0 - /* Disable watchdog timer */ - ctrl_outl(0xa5000000, WTCSR); - /* Configure deep standby on sleep */ - ctrl_outl(0x03, STBCR); -#endif - __asm__ __volatile__ ( "sleep\n\t" "synci\n\t" @@ -113,9 +55,6 @@ void machine_power_off(void) panic("Unexpected wakeup!\n"); } -void (*pm_power_off)(void) = machine_power_off; -EXPORT_SYMBOL(pm_power_off); - void show_regs(struct pt_regs * regs) { unsigned long long ah, al, bh, bl, ch, cl; @@ -365,18 +304,6 @@ void show_regs(struct pt_regs * regs) } } -struct task_struct * alloc_task_struct(void) -{ - /* Get task descriptor pages */ - return (struct task_struct *) - __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)); -} - -void free_task_struct(struct task_struct *p) -{ - free_pages((unsigned long) p, get_order(THREAD_SIZE)); -} - /* * Create a kernel thread */ @@ -662,41 +589,3 @@ unsigned long get_wchan(struct task_struct *p) #endif return pc; } - -/* Provide a /proc/asids file that lists out the - ASIDs currently associated with the processes. (If the DM.PC register is - examined through the debug link, this shows ASID + PC. To make use of this, - the PID->ASID relationship needs to be known. This is primarily for - debugging.) - */ - -#if defined(CONFIG_SH64_PROC_ASIDS) -static int -asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) -{ - int len=0; - struct task_struct *p; - read_lock(&tasklist_lock); - for_each_process(p) { - int pid = p->pid; - - if (!pid) - continue; - if (p->mm) - len += sprintf(buf+len, "%5d : %02lx\n", pid, - asid_cache(smp_processor_id())); - else - len += sprintf(buf+len, "%5d : (none)\n", pid); - } - read_unlock(&tasklist_lock); - *eof = 1; - return len; -} - -static int __init register_proc_asids(void) -{ - create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL); - return 0; -} -__initcall(register_proc_asids); -#endif diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index e15b099c1f06..695097438f02 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -2,7 +2,7 @@ * arch/sh/kernel/ptrace_64.c * * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 - 2007 Paul Mundt + * Copyright (C) 2003 - 2008 Paul Mundt * * Started from SH3/4 version: * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka @@ -29,6 +29,8 @@ #include <linux/audit.h> #include <linux/seccomp.h> #include <linux/tracehook.h> +#include <linux/elf.h> +#include <linux/regset.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -137,6 +139,165 @@ void user_disable_single_step(struct task_struct *child) regs->sr &= ~SR_SSTEP; } +static int genregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* PC, SR, SYSCALL */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->pc, + 0, 3 * sizeof(unsigned long long)); + + /* R1 -> R63 */ + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->regs, + offsetof(struct pt_regs, regs[0]), + 63 * sizeof(unsigned long long)); + /* TR0 -> TR7 */ + if (!ret) + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->tregs, + offsetof(struct pt_regs, tregs[0]), + 8 * sizeof(unsigned long long)); + + if (!ret) + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_regs), -1); + + return ret; +} + +static int genregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* PC, SR, SYSCALL */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + 0, 3 * sizeof(unsigned long long)); + + /* R1 -> R63 */ + if (!ret && count > 0) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->regs, + offsetof(struct pt_regs, regs[0]), + 63 * sizeof(unsigned long long)); + + /* TR0 -> TR7 */ + if (!ret && count > 0) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->tregs, + offsetof(struct pt_regs, tregs[0]), + 8 * sizeof(unsigned long long)); + + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_regs), -1); + + return ret; +} + +#ifdef CONFIG_SH_FPU +int fpregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + ret = init_fpu(target); + if (ret) + return ret; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.hard, 0, -1); +} + +static int fpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + ret = init_fpu(target); + if (ret) + return ret; + + set_stopped_child_used_math(target); + + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.hard, 0, -1); +} + +static int fpregs_active(struct task_struct *target, + const struct user_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} +#endif + +/* + * These are our native regset flavours. + */ +enum sh_regset { + REGSET_GENERAL, +#ifdef CONFIG_SH_FPU + REGSET_FPU, +#endif +}; + +static const struct user_regset sh_regsets[] = { + /* + * Format is: + * PC, SR, SYSCALL, + * R1 --> R63, + * TR0 --> TR7, + */ + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(long long), + .align = sizeof(long long), + .get = genregs_get, + .set = genregs_set, + }, + +#ifdef CONFIG_SH_FPU + [REGSET_FPU] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpu_struct) / + sizeof(long long), + .size = sizeof(long long), + .align = sizeof(long long), + .get = fpregs_get, + .set = fpregs_set, + .active = fpregs_active, + }, +#endif +}; + +static const struct user_regset_view user_sh64_native_view = { + .name = "sh64", + .e_machine = EM_SH, + .regsets = sh_regsets, + .n = ARRAY_SIZE(sh_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sh64_native_view; +} + long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; @@ -195,10 +356,33 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) } break; + case PTRACE_GETREGS: + return copy_regset_to_user(child, &user_sh64_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (void __user *)data); + case PTRACE_SETREGS: + return copy_regset_from_user(child, &user_sh64_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (const void __user *)data); +#ifdef CONFIG_SH_FPU + case PTRACE_GETFPREGS: + return copy_regset_to_user(child, &user_sh64_native_view, + REGSET_FPU, + 0, sizeof(struct user_fpu_struct), + (void __user *)data); + case PTRACE_SETFPREGS: + return copy_regset_from_user(child, &user_sh64_native_view, + REGSET_FPU, + 0, sizeof(struct user_fpu_struct), + (const void __user *)data); +#endif default: ret = ptrace_request(child, request, addr, data); break; } + return ret; } diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e7152cc6930e..534247508572 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -417,6 +417,7 @@ void __init setup_arch(char **cmdline_p) } static const char *cpu_name[] = { + [CPU_SH7201] = "SH7201", [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263", [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c index d1bcac4fa269..c852f7805728 100644 --- a/arch/sh/kernel/sh_bios.c +++ b/arch/sh/kernel/sh_bios.c @@ -8,69 +8,50 @@ #include <linux/module.h> #include <asm/sh_bios.h> -#define BIOS_CALL_CONSOLE_WRITE 0 -#define BIOS_CALL_READ_BLOCK 1 +#define BIOS_CALL_CONSOLE_WRITE 0 #define BIOS_CALL_ETH_NODE_ADDR 10 #define BIOS_CALL_SHUTDOWN 11 -#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */ -#define BIOS_CALL_GDB_GET_MODE_PTR 0xfe -#define BIOS_CALL_GDB_DETACH 0xff +#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */ +#define BIOS_CALL_GDB_DETACH 0xff -static __inline__ long sh_bios_call(long func, long arg0, long arg1, long arg2, long arg3) +static inline long sh_bios_call(long func, long arg0, long arg1, long arg2, + long arg3) { - register long r0 __asm__("r0") = func; - register long r4 __asm__("r4") = arg0; - register long r5 __asm__("r5") = arg1; - register long r6 __asm__("r6") = arg2; - register long r7 __asm__("r7") = arg3; - __asm__ __volatile__("trapa #0x3f" - : "=z" (r0) - : "0" (r0), "r" (r4), "r" (r5), "r" (r6), "r" (r7) - : "memory"); - return r0; + register long r0 __asm__("r0") = func; + register long r4 __asm__("r4") = arg0; + register long r5 __asm__("r5") = arg1; + register long r6 __asm__("r6") = arg2; + register long r7 __asm__("r7") = arg3; + + __asm__ __volatile__("trapa #0x3f":"=z"(r0) + :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7) + :"memory"); + return r0; } - void sh_bios_console_write(const char *buf, unsigned int len) { - sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0); + sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0); } - void sh_bios_char_out(char ch) { - sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0); -} - - -int sh_bios_in_gdb_mode(void) -{ - static char queried = 0; - static char *gdb_mode_p = 0; - - if (!queried) - { - /* Query the gdb stub for address of its gdb mode variable */ - long r = sh_bios_call(BIOS_CALL_GDB_GET_MODE_PTR, 0, 0, 0, 0); - if (r != ~0) /* BIOS returns -1 for unknown function */ - gdb_mode_p = (char *)r; - queried = 1; - } - return (gdb_mode_p != 0 ? *gdb_mode_p : 0); + sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0); } void sh_bios_gdb_detach(void) { - sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); + sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); } -EXPORT_SYMBOL(sh_bios_gdb_detach); +EXPORT_SYMBOL_GPL(sh_bios_gdb_detach); -void sh_bios_get_node_addr (unsigned char *node_addr) +void sh_bios_get_node_addr(unsigned char *node_addr) { - sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0); + sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0); } +EXPORT_SYMBOL_GPL(sh_bios_get_node_addr); void sh_bios_shutdown(unsigned int how) { - sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0); + sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0); } diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 92ae5e6c099e..528de2955c81 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -52,16 +52,12 @@ EXPORT_SYMBOL(__const_udelay); #define DECLARE_EXPORT(name) \ extern void name(void);EXPORT_SYMBOL(name) -#define MAYBE_DECLARE_EXPORT(name) \ - extern void name(void) __weak;EXPORT_SYMBOL(name) -/* These symbols are generated by the compiler itself */ DECLARE_EXPORT(__udivsi3); DECLARE_EXPORT(__sdivsi3); +DECLARE_EXPORT(__lshrsi3); DECLARE_EXPORT(__ashrsi3); DECLARE_EXPORT(__ashlsi3); -DECLARE_EXPORT(__ashrdi3); -DECLARE_EXPORT(__ashldi3); DECLARE_EXPORT(__ashiftrt_r4_6); DECLARE_EXPORT(__ashiftrt_r4_7); DECLARE_EXPORT(__ashiftrt_r4_8); @@ -79,8 +75,7 @@ DECLARE_EXPORT(__ashiftrt_r4_23); DECLARE_EXPORT(__ashiftrt_r4_24); DECLARE_EXPORT(__ashiftrt_r4_27); DECLARE_EXPORT(__ashiftrt_r4_30); -DECLARE_EXPORT(__lshrsi3); -DECLARE_EXPORT(__lshrdi3); +DECLARE_EXPORT(__movstr); DECLARE_EXPORT(__movstrSI8); DECLARE_EXPORT(__movstrSI12); DECLARE_EXPORT(__movstrSI16); @@ -95,31 +90,17 @@ DECLARE_EXPORT(__movstrSI48); DECLARE_EXPORT(__movstrSI52); DECLARE_EXPORT(__movstrSI56); DECLARE_EXPORT(__movstrSI60); -#if __GNUC__ == 4 -DECLARE_EXPORT(__movmem); -#else -DECLARE_EXPORT(__movstr); -#endif - -#if __GNUC__ == 4 +DECLARE_EXPORT(__movstr_i4_even); +DECLARE_EXPORT(__movstr_i4_odd); +DECLARE_EXPORT(__movstrSI12_i4); DECLARE_EXPORT(__movmem_i4_even); DECLARE_EXPORT(__movmem_i4_odd); DECLARE_EXPORT(__movmemSI12_i4); - -#if (__GNUC_MINOR__ >= 2 || defined(__GNUC_STM_RELEASE__)) -/* - * GCC >= 4.2 emits these for division, as do GCC 4.1.x versions of the ST - * compiler which include backported patches. - */ DECLARE_EXPORT(__udiv_qrnnd_16); -MAYBE_DECLARE_EXPORT(__sdivsi3_i4i); -MAYBE_DECLARE_EXPORT(__udivsi3_i4i); -#endif -#else /* GCC 3.x */ -DECLARE_EXPORT(__movstr_i4_even); -DECLARE_EXPORT(__movstr_i4_odd); -DECLARE_EXPORT(__movstrSI12_i4); -#endif /* __GNUC__ == 4 */ +DECLARE_EXPORT(__sdivsi3_i4); +DECLARE_EXPORT(__udivsi3_i4); +DECLARE_EXPORT(__sdivsi3_i4i); +DECLARE_EXPORT(__udivsi3_i4i); #if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ defined(CONFIG_SH7705_CACHE_32KB)) diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 9324d32adacc..0d74d6b8774e 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -65,15 +65,16 @@ EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__const_udelay); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strcpy); /* Ugh. These come in from libgcc.a at link time. */ #define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name) DECLARE_EXPORT(__sdivsi3); -DECLARE_EXPORT(__sdivsi3_2); -DECLARE_EXPORT(__muldi3); DECLARE_EXPORT(__udivsi3); DECLARE_EXPORT(__div_table); diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 69d09c0b3498..77c21bde376a 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -533,7 +533,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, { int ret; - /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index ce3e851dffcb..b22fdfaaa191 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -2,7 +2,7 @@ * arch/sh/kernel/signal_64.c * * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 - 2008 Paul Mundt * Copyright (C) 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -43,10 +43,38 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -static void +static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs); +static inline void +handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) +{ + /* If we're not from a syscall, bail out */ + if (regs->syscall_nr < 0) + return; + + /* check for system call restart.. */ + switch (regs->regs[REG_RET]) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + no_system_call_restart: + regs->regs[REG_RET] = -EINTR; + regs->sr |= 1; + break; + + case -ERESTARTSYS: + if (!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + /* fallthrough */ + case -ERESTARTNOINTR: + /* Decode syscall # */ + regs->regs[REG_RET] = regs->syscall_nr; + regs->pc -= 4; + break; + } +} + /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by @@ -80,21 +108,23 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, 0); - if (signr > 0) { - /* Whee! Actually deliver the signal. */ - handle_signal(signr, &info, &ka, oldset, regs); + if (regs->sr & 1) + handle_syscall_restart(regs, &ka.sa); - /* - * If a signal was successfully delivered, the saved sigmask - * is in its frame, and we can clear the TIF_RESTORE_SIGMASK - * flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signr, &info, &ka, regs, 0); - return 1; + /* Whee! Actually deliver the signal. */ + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { + /* + * If a signal was successfully delivered, the + * saved sigmask is in its frame, and we can + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + + tracehook_signal_handler(signr, &info, &ka, regs, 0); + return 1; + } } no_signal: @@ -129,7 +159,6 @@ no_signal: /* * Atomically swap in the new signal mask, and wait for a signal. */ - asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r3, unsigned long r4, unsigned long r5, @@ -235,20 +264,16 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, return do_sigaltstack(uss, uoss, REF_REG_SP); } - /* * Do a signal return; undo the signal stack. */ - -struct sigframe -{ +struct sigframe { struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; long long retcode[2]; }; -struct rt_sigframe -{ +struct rt_sigframe { struct siginfo __user *pinfo; void *puc; struct siginfo info; @@ -450,7 +475,6 @@ badframe: /* * Set up a signal frame. */ - static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) @@ -504,8 +528,8 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) void sa_default_restorer(void); /* See comments below */ void sa_default_rt_restorer(void); /* See comments below */ -static void setup_frame(int sig, struct k_sigaction *ka, - sigset_t *set, struct pt_regs *regs) +static int setup_frame(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int err = 0; @@ -596,23 +620,21 @@ static void setup_frame(int sig, struct k_sigaction *ka, set_fs(USER_DS); -#if DEBUG_SIG /* Broken %016Lx */ - printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", - signal, - current->comm, current->pid, frame, - regs->pc >> 32, regs->pc & 0xffffffff, - DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); -#endif + pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", + signal, current->comm, current->pid, frame, + regs->pc >> 32, regs->pc & 0xffffffff, + DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); - return; + return 0; give_sigsegv: force_sigsegv(sig, current); + return -EFAULT; } -static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) +static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; @@ -702,62 +724,46 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, set_fs(USER_DS); -#if DEBUG_SIG - /* Broken %016Lx */ - printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", - signal, - current->comm, current->pid, frame, - regs->pc >> 32, regs->pc & 0xffffffff, - DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); -#endif + pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", + signal, current->comm, current->pid, frame, + regs->pc >> 32, regs->pc & 0xffffffff, + DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); - return; + return 0; give_sigsegv: force_sigsegv(sig, current); + return -EFAULT; } /* * OK, we're invoking a handler */ - -static void +static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) { - /* Are we from a system call? */ - if (regs->syscall_nr >= 0) { - /* If so, check system call restarting.. */ - switch (regs->regs[REG_RET]) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - no_system_call_restart: - regs->regs[REG_RET] = -EINTR; - break; - - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) - goto no_system_call_restart; - /* fallthrough */ - case -ERESTARTNOINTR: - /* Decode syscall # */ - regs->regs[REG_RET] = regs->syscall_nr; - regs->pc -= 4; - } - } + int ret; /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) - setup_rt_frame(sig, ka, info, oldset, regs); + ret = setup_rt_frame(sig, ka, info, oldset, regs); else - setup_frame(sig, ka, oldset, regs); + ret = setup_frame(sig, ka, oldset, regs); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + if (ret == 0) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked,sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + return ret; } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 38f098c9c72d..58dfc02c7af1 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -22,102 +22,10 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/ipc.h> -#include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/uaccess.h> #include <asm/unistd.h> -unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ -EXPORT_SYMBOL(shm_align_mask); - -#ifdef CONFIG_MMU -/* - * To avoid cache aliases, we map the shared page with same color. - */ -#define COLOUR_ALIGN(addr, pgoff) \ - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) - -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; - int do_colour_align; - - if (flags & MAP_FIXED) { - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ - if ((flags & MAP_SHARED) && (addr & shm_align_mask)) - return -EINVAL; - return addr; - } - - if (unlikely(len > TASK_SIZE)) - return -ENOMEM; - - do_colour_align = 0; - if (filp || (flags & MAP_SHARED)) - do_colour_align = 1; - - if (addr) { - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) - return addr; - } - - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - mm->cached_hole_size = 0; - start_addr = addr = TASK_UNMAPPED_BASE; - } - -full_search: - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(mm->free_area_cache); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (unlikely(TASK_SIZE - len < addr)) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = vma->vm_end; - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - } -} -#endif /* CONFIG_MMU */ - static inline long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, int fd, unsigned long pgoff) diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index 23ca711c27d2..8457f83242c5 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -125,11 +125,6 @@ void handle_timer_tick(void) if (current->pid) profile_tick(CPU_PROFILING); -#ifdef CONFIG_HEARTBEAT - if (sh_mv.mv_heartbeat != NULL) - sh_mv.mv_heartbeat(); -#endif - /* * Here we are in the timer irq handler. We just have irqs locally * disabled but we don't know if the timer_bh is running on the other @@ -277,11 +272,4 @@ void __init time_init(void) ((sh_hpt_frequency + 500) / 1000) / 1000, ((sh_hpt_frequency + 500) / 1000) % 1000); -#if defined(CONFIG_SH_KGDB) - /* - * Set up kgdb as requested. We do it here because the serial - * init uses the timer vars we just set up for figuring baud. - */ - kgdb_init(); -#endif } diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c index bbb2af1004d9..59d2a03e8b3c 100644 --- a/arch/sh/kernel/time_64.c +++ b/arch/sh/kernel/time_64.c @@ -240,11 +240,6 @@ static inline void do_timer_interrupt(void) do_timer(1); -#ifdef CONFIG_HEARTBEAT - if (sh_mv.mv_heartbeat != NULL) - sh_mv.mv_heartbeat(); -#endif - /* * If we have an externally synchronized Linux clock, then update * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c index fe453c01f9c9..c3d237e1d566 100644 --- a/arch/sh/kernel/timers/timer-mtu2.c +++ b/arch/sh/kernel/timers/timer-mtu2.c @@ -34,7 +34,12 @@ #define MTU2_TIER_1 0xfffe4384 #define MTU2_TSR_1 0xfffe4385 #define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */ + +#if defined(CONFIG_CPU_SUBTYPE_SH7201) +#define MTU2_TGRA_1 0xfffe4388 +#else #define MTU2_TGRA_1 0xfffe438a +#endif #define STBCR3 0xfffe0408 diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 1e5c74efbacc..88807a2aacc3 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -28,17 +28,6 @@ #include <asm/fpu.h> #include <asm/kprobes.h> -#ifdef CONFIG_SH_KGDB -#include <asm/kgdb.h> -#define CHK_REMOTE_DEBUG(regs) \ -{ \ - if (kgdb_debug_hook && !user_mode(regs))\ - (*kgdb_debug_hook)(regs); \ -} -#else -#define CHK_REMOTE_DEBUG(regs) -#endif - #ifdef CONFIG_CPU_SH2 # define TRAP_RESERVED_INST 4 # define TRAP_ILLEGAL_SLOT_INST 6 @@ -94,7 +83,6 @@ void die(const char * str, struct pt_regs * regs, long err) printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); - CHK_REMOTE_DEBUG(regs); print_modules(); show_regs(regs); @@ -683,13 +671,12 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, error_code = lookup_exception_vector(); local_irq_enable(); - CHK_REMOTE_DEBUG(regs); force_sig(SIGILL, tsk); die_if_no_fixup("reserved instruction", regs, error_code); } #ifdef CONFIG_SH_FPU_EMU -static int emulate_branch(unsigned short inst, struct pt_regs* regs) +static int emulate_branch(unsigned short inst, struct pt_regs *regs) { /* * bfs: 8fxx: PC+=d*2+4; @@ -702,27 +689,32 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs) * jsr: 4x0b: PC=Rn after PR=PC+4; * rts: 000b: PC=PR; */ - if ((inst & 0xfd00) == 0x8d00) { + if (((inst & 0xf000) == 0xb000) || /* bsr */ + ((inst & 0xf0ff) == 0x0003) || /* bsrf */ + ((inst & 0xf0ff) == 0x400b)) /* jsr */ + regs->pr = regs->pc + 4; + + if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */ regs->pc += SH_PC_8BIT_OFFSET(inst); return 0; } - if ((inst & 0xe000) == 0xa000) { + if ((inst & 0xe000) == 0xa000) { /* bra, bsr */ regs->pc += SH_PC_12BIT_OFFSET(inst); return 0; } - if ((inst & 0xf0df) == 0x0003) { + if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */ regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; return 0; } - if ((inst & 0xf0df) == 0x400b) { + if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */ regs->pc = regs->regs[(inst & 0x0f00) >> 8]; return 0; } - if ((inst & 0xffff) == 0x000b) { + if ((inst & 0xffff) == 0x000b) { /* rts */ regs->pc = regs->pr; return 0; } @@ -756,7 +748,6 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, inst = lookup_exception_vector(); local_irq_enable(); - CHK_REMOTE_DEBUG(regs); force_sig(SIGILL, tsk); die_if_no_fixup("illegal slot instruction", regs, inst); } @@ -868,10 +859,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, if (regs && user_mode(regs)) return; - printk("\nCall trace: "); -#ifdef CONFIG_KALLSYMS - printk("\n"); -#endif + printk("\nCall trace:\n"); while (!kstack_end(sp)) { addr = *sp++; diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 95f4de0800ec..3f7e415be86a 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -59,8 +59,7 @@ int __init vsyscall_init(void) } /* Setup a VMA at program startup for the vsyscall page */ -int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 8596cc78e18d..aaea580b65bb 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -5,12 +5,26 @@ lib-y = delay.o memset.o memmove.o memchr.o \ checksum.o strlen.o div64.o div64-generic.o +# Extracted from libgcc +lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ + ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ + udiv_qrnnd.o + +udivsi3-y := udivsi3_i4i-Os.o + +ifneq ($(CONFIG_CC_OPTIMIZE_FOR_SIZE),y) +udivsi3-$(CONFIG_CPU_SH3) := udivsi3_i4i.o +udivsi3-$(CONFIG_CPU_SH4) := udivsi3_i4i.o +endif +udivsi3-y += udivsi3.o + obj-y += io.o memcpy-y := memcpy.o memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o lib-$(CONFIG_MMU) += copy_page.o clear_page.o -lib-y += $(memcpy-y) +lib-$(CONFIG_FUNCTION_TRACER) += mcount.o +lib-y += $(memcpy-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror diff --git a/arch/sh/lib/ashiftrt.S b/arch/sh/lib/ashiftrt.S new file mode 100644 index 000000000000..45ce86558f46 --- /dev/null +++ b/arch/sh/lib/ashiftrt.S @@ -0,0 +1,149 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + + .global __ashiftrt_r4_0 + .global __ashiftrt_r4_1 + .global __ashiftrt_r4_2 + .global __ashiftrt_r4_3 + .global __ashiftrt_r4_4 + .global __ashiftrt_r4_5 + .global __ashiftrt_r4_6 + .global __ashiftrt_r4_7 + .global __ashiftrt_r4_8 + .global __ashiftrt_r4_9 + .global __ashiftrt_r4_10 + .global __ashiftrt_r4_11 + .global __ashiftrt_r4_12 + .global __ashiftrt_r4_13 + .global __ashiftrt_r4_14 + .global __ashiftrt_r4_15 + .global __ashiftrt_r4_16 + .global __ashiftrt_r4_17 + .global __ashiftrt_r4_18 + .global __ashiftrt_r4_19 + .global __ashiftrt_r4_20 + .global __ashiftrt_r4_21 + .global __ashiftrt_r4_22 + .global __ashiftrt_r4_23 + .global __ashiftrt_r4_24 + .global __ashiftrt_r4_25 + .global __ashiftrt_r4_26 + .global __ashiftrt_r4_27 + .global __ashiftrt_r4_28 + .global __ashiftrt_r4_29 + .global __ashiftrt_r4_30 + .global __ashiftrt_r4_31 + .global __ashiftrt_r4_32 + + .align 1 +__ashiftrt_r4_32: +__ashiftrt_r4_31: + rotcl r4 + rts + subc r4,r4 +__ashiftrt_r4_30: + shar r4 +__ashiftrt_r4_29: + shar r4 +__ashiftrt_r4_28: + shar r4 +__ashiftrt_r4_27: + shar r4 +__ashiftrt_r4_26: + shar r4 +__ashiftrt_r4_25: + shar r4 +__ashiftrt_r4_24: + shlr16 r4 + shlr8 r4 + rts + exts.b r4,r4 +__ashiftrt_r4_23: + shar r4 +__ashiftrt_r4_22: + shar r4 +__ashiftrt_r4_21: + shar r4 +__ashiftrt_r4_20: + shar r4 +__ashiftrt_r4_19: + shar r4 +__ashiftrt_r4_18: + shar r4 +__ashiftrt_r4_17: + shar r4 +__ashiftrt_r4_16: + shlr16 r4 + rts + exts.w r4,r4 +__ashiftrt_r4_15: + shar r4 +__ashiftrt_r4_14: + shar r4 +__ashiftrt_r4_13: + shar r4 +__ashiftrt_r4_12: + shar r4 +__ashiftrt_r4_11: + shar r4 +__ashiftrt_r4_10: + shar r4 +__ashiftrt_r4_9: + shar r4 +__ashiftrt_r4_8: + shar r4 +__ashiftrt_r4_7: + shar r4 +__ashiftrt_r4_6: + shar r4 +__ashiftrt_r4_5: + shar r4 +__ashiftrt_r4_4: + shar r4 +__ashiftrt_r4_3: + shar r4 +__ashiftrt_r4_2: + shar r4 +__ashiftrt_r4_1: + rts + shar r4 +__ashiftrt_r4_0: + rts + nop diff --git a/arch/sh/lib/ashldi3.c b/arch/sh/lib/ashldi3.c new file mode 100644 index 000000000000..beb80f316095 --- /dev/null +++ b/arch/sh/lib/ashldi3.c @@ -0,0 +1,29 @@ +#include <linux/module.h> + +#include "libgcc.h" + +long long __ashldi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.low = 0; + w.s.high = (unsigned int) uu.s.low << -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.low >> bm; + + w.s.low = (unsigned int) uu.s.low << b; + w.s.high = ((unsigned int) uu.s.high << b) | carries; + } + + return w.ll; +} + +EXPORT_SYMBOL(__ashldi3); diff --git a/arch/sh/lib/ashlsi3.S b/arch/sh/lib/ashlsi3.S new file mode 100644 index 000000000000..bd47e9b403a5 --- /dev/null +++ b/arch/sh/lib/ashlsi3.S @@ -0,0 +1,193 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + +! +! __ashlsi3 +! +! Entry: +! +! r4: Value to shift +! r5: Shifts +! +! Exit: +! +! r0: Result +! +! Destroys: +! +! (none) +! + .global __ashlsi3 + + .align 2 +__ashlsi3: + mov #31,r0 + and r0,r5 + mova ashlsi3_table,r0 + mov.b @(r0,r5),r5 +#ifdef __sh1__ + add r5,r0 + jmp @r0 +#else + braf r5 +#endif + mov r4,r0 + + .align 2 +ashlsi3_table: + .byte ashlsi3_0-ashlsi3_table + .byte ashlsi3_1-ashlsi3_table + .byte ashlsi3_2-ashlsi3_table + .byte ashlsi3_3-ashlsi3_table + .byte ashlsi3_4-ashlsi3_table + .byte ashlsi3_5-ashlsi3_table + .byte ashlsi3_6-ashlsi3_table + .byte ashlsi3_7-ashlsi3_table + .byte ashlsi3_8-ashlsi3_table + .byte ashlsi3_9-ashlsi3_table + .byte ashlsi3_10-ashlsi3_table + .byte ashlsi3_11-ashlsi3_table + .byte ashlsi3_12-ashlsi3_table + .byte ashlsi3_13-ashlsi3_table + .byte ashlsi3_14-ashlsi3_table + .byte ashlsi3_15-ashlsi3_table + .byte ashlsi3_16-ashlsi3_table + .byte ashlsi3_17-ashlsi3_table + .byte ashlsi3_18-ashlsi3_table + .byte ashlsi3_19-ashlsi3_table + .byte ashlsi3_20-ashlsi3_table + .byte ashlsi3_21-ashlsi3_table + .byte ashlsi3_22-ashlsi3_table + .byte ashlsi3_23-ashlsi3_table + .byte ashlsi3_24-ashlsi3_table + .byte ashlsi3_25-ashlsi3_table + .byte ashlsi3_26-ashlsi3_table + .byte ashlsi3_27-ashlsi3_table + .byte ashlsi3_28-ashlsi3_table + .byte ashlsi3_29-ashlsi3_table + .byte ashlsi3_30-ashlsi3_table + .byte ashlsi3_31-ashlsi3_table + +ashlsi3_6: + shll2 r0 +ashlsi3_4: + shll2 r0 +ashlsi3_2: + rts + shll2 r0 + +ashlsi3_7: + shll2 r0 +ashlsi3_5: + shll2 r0 +ashlsi3_3: + shll2 r0 +ashlsi3_1: + rts + shll r0 + +ashlsi3_14: + shll2 r0 +ashlsi3_12: + shll2 r0 +ashlsi3_10: + shll2 r0 +ashlsi3_8: + rts + shll8 r0 + +ashlsi3_15: + shll2 r0 +ashlsi3_13: + shll2 r0 +ashlsi3_11: + shll2 r0 +ashlsi3_9: + shll8 r0 + rts + shll r0 + +ashlsi3_22: + shll2 r0 +ashlsi3_20: + shll2 r0 +ashlsi3_18: + shll2 r0 +ashlsi3_16: + rts + shll16 r0 + +ashlsi3_23: + shll2 r0 +ashlsi3_21: + shll2 r0 +ashlsi3_19: + shll2 r0 +ashlsi3_17: + shll16 r0 + rts + shll r0 + +ashlsi3_30: + shll2 r0 +ashlsi3_28: + shll2 r0 +ashlsi3_26: + shll2 r0 +ashlsi3_24: + shll16 r0 + rts + shll8 r0 + +ashlsi3_31: + shll2 r0 +ashlsi3_29: + shll2 r0 +ashlsi3_27: + shll2 r0 +ashlsi3_25: + shll16 r0 + shll8 r0 + rts + shll r0 + +ashlsi3_0: + rts + nop diff --git a/arch/sh/lib/ashrdi3.c b/arch/sh/lib/ashrdi3.c new file mode 100644 index 000000000000..c884a912b660 --- /dev/null +++ b/arch/sh/lib/ashrdi3.c @@ -0,0 +1,31 @@ +#include <linux/module.h> + +#include "libgcc.h" + +long long __ashrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = + uu.s.high >> 31; + w.s.low = uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} + +EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/sh/lib/ashrsi3.S b/arch/sh/lib/ashrsi3.S new file mode 100644 index 000000000000..6f3cf46b77c2 --- /dev/null +++ b/arch/sh/lib/ashrsi3.S @@ -0,0 +1,185 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + +! +! __ashrsi3 +! +! Entry: +! +! r4: Value to shift +! r5: Shifts +! +! Exit: +! +! r0: Result +! +! Destroys: +! +! (none) +! + + .global __ashrsi3 + + .align 2 +__ashrsi3: + mov #31,r0 + and r0,r5 + mova ashrsi3_table,r0 + mov.b @(r0,r5),r5 +#ifdef __sh1__ + add r5,r0 + jmp @r0 +#else + braf r5 +#endif + mov r4,r0 + + .align 2 +ashrsi3_table: + .byte ashrsi3_0-ashrsi3_table + .byte ashrsi3_1-ashrsi3_table + .byte ashrsi3_2-ashrsi3_table + .byte ashrsi3_3-ashrsi3_table + .byte ashrsi3_4-ashrsi3_table + .byte ashrsi3_5-ashrsi3_table + .byte ashrsi3_6-ashrsi3_table + .byte ashrsi3_7-ashrsi3_table + .byte ashrsi3_8-ashrsi3_table + .byte ashrsi3_9-ashrsi3_table + .byte ashrsi3_10-ashrsi3_table + .byte ashrsi3_11-ashrsi3_table + .byte ashrsi3_12-ashrsi3_table + .byte ashrsi3_13-ashrsi3_table + .byte ashrsi3_14-ashrsi3_table + .byte ashrsi3_15-ashrsi3_table + .byte ashrsi3_16-ashrsi3_table + .byte ashrsi3_17-ashrsi3_table + .byte ashrsi3_18-ashrsi3_table + .byte ashrsi3_19-ashrsi3_table + .byte ashrsi3_20-ashrsi3_table + .byte ashrsi3_21-ashrsi3_table + .byte ashrsi3_22-ashrsi3_table + .byte ashrsi3_23-ashrsi3_table + .byte ashrsi3_24-ashrsi3_table + .byte ashrsi3_25-ashrsi3_table + .byte ashrsi3_26-ashrsi3_table + .byte ashrsi3_27-ashrsi3_table + .byte ashrsi3_28-ashrsi3_table + .byte ashrsi3_29-ashrsi3_table + .byte ashrsi3_30-ashrsi3_table + .byte ashrsi3_31-ashrsi3_table + +ashrsi3_31: + rotcl r0 + rts + subc r0,r0 + +ashrsi3_30: + shar r0 +ashrsi3_29: + shar r0 +ashrsi3_28: + shar r0 +ashrsi3_27: + shar r0 +ashrsi3_26: + shar r0 +ashrsi3_25: + shar r0 +ashrsi3_24: + shlr16 r0 + shlr8 r0 + rts + exts.b r0,r0 + +ashrsi3_23: + shar r0 +ashrsi3_22: + shar r0 +ashrsi3_21: + shar r0 +ashrsi3_20: + shar r0 +ashrsi3_19: + shar r0 +ashrsi3_18: + shar r0 +ashrsi3_17: + shar r0 +ashrsi3_16: + shlr16 r0 + rts + exts.w r0,r0 + +ashrsi3_15: + shar r0 +ashrsi3_14: + shar r0 +ashrsi3_13: + shar r0 +ashrsi3_12: + shar r0 +ashrsi3_11: + shar r0 +ashrsi3_10: + shar r0 +ashrsi3_9: + shar r0 +ashrsi3_8: + shar r0 +ashrsi3_7: + shar r0 +ashrsi3_6: + shar r0 +ashrsi3_5: + shar r0 +ashrsi3_4: + shar r0 +ashrsi3_3: + shar r0 +ashrsi3_2: + shar r0 +ashrsi3_1: + rts + shar r0 + +ashrsi3_0: + rts + nop diff --git a/arch/sh/lib/libgcc.h b/arch/sh/lib/libgcc.h new file mode 100644 index 000000000000..3f19d1c5d942 --- /dev/null +++ b/arch/sh/lib/libgcc.h @@ -0,0 +1,26 @@ +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include <asm/byteorder.h> + +typedef int word_type __attribute__ ((mode (__word__))); + +#ifdef __BIG_ENDIAN +struct DWstruct { + int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { + int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union +{ + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/sh/lib/lshrdi3.c b/arch/sh/lib/lshrdi3.c new file mode 100644 index 000000000000..dcf8d6810b7c --- /dev/null +++ b/arch/sh/lib/lshrdi3.c @@ -0,0 +1,29 @@ +#include <linux/module.h> + +#include "libgcc.h" + +long long __lshrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.high = 0; + w.s.low = (unsigned int) uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = (unsigned int) uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} + +EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/sh/lib/lshrsi3.S b/arch/sh/lib/lshrsi3.S new file mode 100644 index 000000000000..1e7aaa557130 --- /dev/null +++ b/arch/sh/lib/lshrsi3.S @@ -0,0 +1,193 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + +! +! __lshrsi3 +! +! Entry: +! +! r4: Value to shift +! r5: Shifts +! +! Exit: +! +! r0: Result +! +! Destroys: +! +! (none) +! + .global __lshrsi3 + + .align 2 +__lshrsi3: + mov #31,r0 + and r0,r5 + mova lshrsi3_table,r0 + mov.b @(r0,r5),r5 +#ifdef __sh1__ + add r5,r0 + jmp @r0 +#else + braf r5 +#endif + mov r4,r0 + + .align 2 +lshrsi3_table: + .byte lshrsi3_0-lshrsi3_table + .byte lshrsi3_1-lshrsi3_table + .byte lshrsi3_2-lshrsi3_table + .byte lshrsi3_3-lshrsi3_table + .byte lshrsi3_4-lshrsi3_table + .byte lshrsi3_5-lshrsi3_table + .byte lshrsi3_6-lshrsi3_table + .byte lshrsi3_7-lshrsi3_table + .byte lshrsi3_8-lshrsi3_table + .byte lshrsi3_9-lshrsi3_table + .byte lshrsi3_10-lshrsi3_table + .byte lshrsi3_11-lshrsi3_table + .byte lshrsi3_12-lshrsi3_table + .byte lshrsi3_13-lshrsi3_table + .byte lshrsi3_14-lshrsi3_table + .byte lshrsi3_15-lshrsi3_table + .byte lshrsi3_16-lshrsi3_table + .byte lshrsi3_17-lshrsi3_table + .byte lshrsi3_18-lshrsi3_table + .byte lshrsi3_19-lshrsi3_table + .byte lshrsi3_20-lshrsi3_table + .byte lshrsi3_21-lshrsi3_table + .byte lshrsi3_22-lshrsi3_table + .byte lshrsi3_23-lshrsi3_table + .byte lshrsi3_24-lshrsi3_table + .byte lshrsi3_25-lshrsi3_table + .byte lshrsi3_26-lshrsi3_table + .byte lshrsi3_27-lshrsi3_table + .byte lshrsi3_28-lshrsi3_table + .byte lshrsi3_29-lshrsi3_table + .byte lshrsi3_30-lshrsi3_table + .byte lshrsi3_31-lshrsi3_table + +lshrsi3_6: + shlr2 r0 +lshrsi3_4: + shlr2 r0 +lshrsi3_2: + rts + shlr2 r0 + +lshrsi3_7: + shlr2 r0 +lshrsi3_5: + shlr2 r0 +lshrsi3_3: + shlr2 r0 +lshrsi3_1: + rts + shlr r0 + +lshrsi3_14: + shlr2 r0 +lshrsi3_12: + shlr2 r0 +lshrsi3_10: + shlr2 r0 +lshrsi3_8: + rts + shlr8 r0 + +lshrsi3_15: + shlr2 r0 +lshrsi3_13: + shlr2 r0 +lshrsi3_11: + shlr2 r0 +lshrsi3_9: + shlr8 r0 + rts + shlr r0 + +lshrsi3_22: + shlr2 r0 +lshrsi3_20: + shlr2 r0 +lshrsi3_18: + shlr2 r0 +lshrsi3_16: + rts + shlr16 r0 + +lshrsi3_23: + shlr2 r0 +lshrsi3_21: + shlr2 r0 +lshrsi3_19: + shlr2 r0 +lshrsi3_17: + shlr16 r0 + rts + shlr r0 + +lshrsi3_30: + shlr2 r0 +lshrsi3_28: + shlr2 r0 +lshrsi3_26: + shlr2 r0 +lshrsi3_24: + shlr16 r0 + rts + shlr8 r0 + +lshrsi3_31: + shlr2 r0 +lshrsi3_29: + shlr2 r0 +lshrsi3_27: + shlr2 r0 +lshrsi3_25: + shlr16 r0 + shlr8 r0 + rts + shlr r0 + +lshrsi3_0: + rts + nop diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S new file mode 100644 index 000000000000..110fbfe1831f --- /dev/null +++ b/arch/sh/lib/mcount.S @@ -0,0 +1,90 @@ +/* + * arch/sh/lib/mcount.S + * + * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008 Matt Fleming + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <asm/ftrace.h> + +#define MCOUNT_ENTER() \ + mov.l r4, @-r15; \ + mov.l r5, @-r15; \ + mov.l r6, @-r15; \ + mov.l r7, @-r15; \ + sts.l pr, @-r15; \ + \ + mov.l @(20,r15),r4; \ + sts pr, r5 + +#define MCOUNT_LEAVE() \ + lds.l @r15+, pr; \ + mov.l @r15+, r7; \ + mov.l @r15+, r6; \ + mov.l @r15+, r5; \ + rts; \ + mov.l @r15+, r4 + + .align 2 + .globl _mcount + .type _mcount,@function + .globl mcount + .type mcount,@function +_mcount: +mcount: + MCOUNT_ENTER() + +#ifdef CONFIG_DYNAMIC_FTRACE + .globl mcount_call +mcount_call: + mov.l .Lftrace_stub, r6 +#else + mov.l .Lftrace_trace_function, r6 + mov.l ftrace_stub, r7 + cmp/eq r6, r7 + bt skip_trace + mov.l @r6, r6 +#endif + + jsr @r6 + nop + +skip_trace: + MCOUNT_LEAVE() + + .align 2 +.Lftrace_trace_function: + .long ftrace_trace_function + +#ifdef CONFIG_DYNAMIC_FTRACE + .globl ftrace_caller +ftrace_caller: + MCOUNT_ENTER() + + .globl ftrace_call +ftrace_call: + mov.l .Lftrace_stub, r6 + jsr @r6 + nop + + MCOUNT_LEAVE() +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * NOTE: From here on the locations of the .Lftrace_stub label and + * ftrace_stub itself are fixed. Adding additional data here will skew + * the displacement for the memory table and break the block replacement. + * Place new labels either after the ftrace_stub body, or before + * ftrace_caller. You have been warned. + */ + .align 2 +.Lftrace_stub: + .long ftrace_stub + + .globl ftrace_stub +ftrace_stub: + rts + nop diff --git a/arch/sh/lib/movmem.S b/arch/sh/lib/movmem.S new file mode 100644 index 000000000000..62075f6bc67c --- /dev/null +++ b/arch/sh/lib/movmem.S @@ -0,0 +1,238 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + + .text + .balign 4 + .global __movmem + .global __movstr + .set __movstr, __movmem + /* This would be a lot simpler if r6 contained the byte count + minus 64, and we wouldn't be called here for a byte count of 64. */ +__movmem: + sts.l pr,@-r15 + shll2 r6 + bsr __movmemSI52+2 + mov.l @(48,r5),r0 + .balign 4 +movmem_loop: /* Reached with rts */ + mov.l @(60,r5),r0 + add #-64,r6 + mov.l r0,@(60,r4) + tst r6,r6 + mov.l @(56,r5),r0 + bt movmem_done + mov.l r0,@(56,r4) + cmp/pl r6 + mov.l @(52,r5),r0 + add #64,r5 + mov.l r0,@(52,r4) + add #64,r4 + bt __movmemSI52 +! done all the large groups, do the remainder +! jump to movmem+ + mova __movmemSI4+4,r0 + add r6,r0 + jmp @r0 +movmem_done: ! share slot insn, works out aligned. + lds.l @r15+,pr + mov.l r0,@(56,r4) + mov.l @(52,r5),r0 + rts + mov.l r0,@(52,r4) + .balign 4 + + .global __movmemSI64 + .global __movstrSI64 + .set __movstrSI64, __movmemSI64 +__movmemSI64: + mov.l @(60,r5),r0 + mov.l r0,@(60,r4) + .global __movmemSI60 + .global __movstrSI60 + .set __movstrSI60, __movmemSI60 +__movmemSI60: + mov.l @(56,r5),r0 + mov.l r0,@(56,r4) + .global __movmemSI56 + .global __movstrSI56 + .set __movstrSI56, __movmemSI56 +__movmemSI56: + mov.l @(52,r5),r0 + mov.l r0,@(52,r4) + .global __movmemSI52 + .global __movstrSI52 + .set __movstrSI52, __movmemSI52 +__movmemSI52: + mov.l @(48,r5),r0 + mov.l r0,@(48,r4) + .global __movmemSI48 + .global __movstrSI48 + .set __movstrSI48, __movmemSI48 +__movmemSI48: + mov.l @(44,r5),r0 + mov.l r0,@(44,r4) + .global __movmemSI44 + .global __movstrSI44 + .set __movstrSI44, __movmemSI44 +__movmemSI44: + mov.l @(40,r5),r0 + mov.l r0,@(40,r4) + .global __movmemSI40 + .global __movstrSI40 + .set __movstrSI40, __movmemSI40 +__movmemSI40: + mov.l @(36,r5),r0 + mov.l r0,@(36,r4) + .global __movmemSI36 + .global __movstrSI36 + .set __movstrSI36, __movmemSI36 +__movmemSI36: + mov.l @(32,r5),r0 + mov.l r0,@(32,r4) + .global __movmemSI32 + .global __movstrSI32 + .set __movstrSI32, __movmemSI32 +__movmemSI32: + mov.l @(28,r5),r0 + mov.l r0,@(28,r4) + .global __movmemSI28 + .global __movstrSI28 + .set __movstrSI28, __movmemSI28 +__movmemSI28: + mov.l @(24,r5),r0 + mov.l r0,@(24,r4) + .global __movmemSI24 + .global __movstrSI24 + .set __movstrSI24, __movmemSI24 +__movmemSI24: + mov.l @(20,r5),r0 + mov.l r0,@(20,r4) + .global __movmemSI20 + .global __movstrSI20 + .set __movstrSI20, __movmemSI20 +__movmemSI20: + mov.l @(16,r5),r0 + mov.l r0,@(16,r4) + .global __movmemSI16 + .global __movstrSI16 + .set __movstrSI16, __movmemSI16 +__movmemSI16: + mov.l @(12,r5),r0 + mov.l r0,@(12,r4) + .global __movmemSI12 + .global __movstrSI12 + .set __movstrSI12, __movmemSI12 +__movmemSI12: + mov.l @(8,r5),r0 + mov.l r0,@(8,r4) + .global __movmemSI8 + .global __movstrSI8 + .set __movstrSI8, __movmemSI8 +__movmemSI8: + mov.l @(4,r5),r0 + mov.l r0,@(4,r4) + .global __movmemSI4 + .global __movstrSI4 + .set __movstrSI4, __movmemSI4 +__movmemSI4: + mov.l @(0,r5),r0 + rts + mov.l r0,@(0,r4) + + .global __movmem_i4_even + .global __movstr_i4_even + .set __movstr_i4_even, __movmem_i4_even + + .global __movmem_i4_odd + .global __movstr_i4_odd + .set __movstr_i4_odd, __movmem_i4_odd + + .global __movmemSI12_i4 + .global __movstrSI12_i4 + .set __movstrSI12_i4, __movmemSI12_i4 + + .p2align 5 +L_movmem_2mod4_end: + mov.l r0,@(16,r4) + rts + mov.l r1,@(20,r4) + + .p2align 2 + +__movmem_i4_even: + mov.l @r5+,r0 + bra L_movmem_start_even + mov.l @r5+,r1 + +__movmem_i4_odd: + mov.l @r5+,r1 + add #-4,r4 + mov.l @r5+,r2 + mov.l @r5+,r3 + mov.l r1,@(4,r4) + mov.l r2,@(8,r4) + +L_movmem_loop: + mov.l r3,@(12,r4) + dt r6 + mov.l @r5+,r0 + bt/s L_movmem_2mod4_end + mov.l @r5+,r1 + add #16,r4 +L_movmem_start_even: + mov.l @r5+,r2 + mov.l @r5+,r3 + mov.l r0,@r4 + dt r6 + mov.l r1,@(4,r4) + bf/s L_movmem_loop + mov.l r2,@(8,r4) + rts + mov.l r3,@(12,r4) + + .p2align 4 +__movmemSI12_i4: + mov.l @r5,r0 + mov.l @(4,r5),r1 + mov.l @(8,r5),r2 + mov.l r0,@r4 + mov.l r1,@(4,r4) + rts + mov.l r2,@(8,r4) diff --git a/arch/sh/lib/udiv_qrnnd.S b/arch/sh/lib/udiv_qrnnd.S new file mode 100644 index 000000000000..32b9a36de943 --- /dev/null +++ b/arch/sh/lib/udiv_qrnnd.S @@ -0,0 +1,81 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + + /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */ + /* n1 < d, but n1 might be larger than d1. */ + .global __udiv_qrnnd_16 + .balign 8 +__udiv_qrnnd_16: + div0u + cmp/hi r6,r0 + bt .Lots + .rept 16 + div1 r6,r0 + .endr + extu.w r0,r1 + bt 0f + add r6,r0 +0: rotcl r1 + mulu.w r1,r5 + xtrct r4,r0 + swap.w r0,r0 + sts macl,r2 + cmp/hs r2,r0 + sub r2,r0 + bt 0f + addc r5,r0 + add #-1,r1 + bt 0f +1: add #-1,r1 + rts + add r5,r0 + .balign 8 +.Lots: + sub r5,r0 + swap.w r4,r1 + xtrct r0,r1 + clrt + mov r1,r0 + addc r5,r0 + mov #-1,r1 + bf/s 1b + shlr16 r1 +0: rts + nop diff --git a/arch/sh/lib/udivsi3.S b/arch/sh/lib/udivsi3.S new file mode 100644 index 000000000000..72157ab5c314 --- /dev/null +++ b/arch/sh/lib/udivsi3.S @@ -0,0 +1,87 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + + .balign 4 + .global __udivsi3 + .type __udivsi3, @function +div8: + div1 r5,r4 +div7: + div1 r5,r4; div1 r5,r4; div1 r5,r4 + div1 r5,r4; div1 r5,r4; div1 r5,r4; rts; div1 r5,r4 + +divx4: + div1 r5,r4; rotcl r0 + div1 r5,r4; rotcl r0 + div1 r5,r4; rotcl r0 + rts; div1 r5,r4 + +__udivsi3: + sts.l pr,@-r15 + extu.w r5,r0 + cmp/eq r5,r0 + bf/s large_divisor + div0u + swap.w r4,r0 + shlr16 r4 + bsr div8 + shll16 r5 + bsr div7 + div1 r5,r4 + xtrct r4,r0 + xtrct r0,r4 + bsr div8 + swap.w r4,r4 + bsr div7 + div1 r5,r4 + lds.l @r15+,pr + xtrct r4,r0 + swap.w r0,r0 + rotcl r0 + rts + shlr16 r5 + +large_divisor: + mov #0,r0 + xtrct r4,r0 + xtrct r0,r4 + bsr divx4 + rotcl r0 + bsr divx4 + rotcl r0 + bsr divx4 + rotcl r0 + bsr divx4 + rotcl r0 + lds.l @r15+,pr + rts + rotcl r0 diff --git a/arch/sh/lib/udivsi3_i4i-Os.S b/arch/sh/lib/udivsi3_i4i-Os.S new file mode 100644 index 000000000000..4835553e1ea9 --- /dev/null +++ b/arch/sh/lib/udivsi3_i4i-Os.S @@ -0,0 +1,149 @@ +/* Copyright (C) 2006 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* Moderately Space-optimized libgcc routines for the Renesas SH / + STMicroelectronics ST40 CPUs. + Contributed by J"orn Rennecke joern.rennecke@st.com. */ + +/* Size: 186 bytes jointly for udivsi3_i4i and sdivsi3_i4i + sh4-200 run times: + udiv small divisor: 55 cycles + udiv large divisor: 52 cycles + sdiv small divisor, positive result: 59 cycles + sdiv large divisor, positive result: 56 cycles + sdiv small divisor, negative result: 65 cycles (*) + sdiv large divisor, negative result: 62 cycles (*) + (*): r2 is restored in the rts delay slot and has a lingering latency + of two more cycles. */ + .balign 4 + .global __udivsi3_i4i + .global __udivsi3_i4 + .set __udivsi3_i4, __udivsi3_i4i + .type __udivsi3_i4i, @function + .type __sdivsi3_i4i, @function +__udivsi3_i4i: + sts pr,r1 + mov.l r4,@-r15 + extu.w r5,r0 + cmp/eq r5,r0 + swap.w r4,r0 + shlr16 r4 + bf/s large_divisor + div0u + mov.l r5,@-r15 + shll16 r5 +sdiv_small_divisor: + div1 r5,r4 + bsr div6 + div1 r5,r4 + div1 r5,r4 + bsr div6 + div1 r5,r4 + xtrct r4,r0 + xtrct r0,r4 + bsr div7 + swap.w r4,r4 + div1 r5,r4 + bsr div7 + div1 r5,r4 + xtrct r4,r0 + mov.l @r15+,r5 + swap.w r0,r0 + mov.l @r15+,r4 + jmp @r1 + rotcl r0 +div7: + div1 r5,r4 +div6: + div1 r5,r4; div1 r5,r4; div1 r5,r4 + div1 r5,r4; div1 r5,r4; rts; div1 r5,r4 + +divx3: + rotcl r0 + div1 r5,r4 + rotcl r0 + div1 r5,r4 + rotcl r0 + rts + div1 r5,r4 + +large_divisor: + mov.l r5,@-r15 +sdiv_large_divisor: + xor r4,r0 + .rept 4 + rotcl r0 + bsr divx3 + div1 r5,r4 + .endr + mov.l @r15+,r5 + mov.l @r15+,r4 + jmp @r1 + rotcl r0 + + .global __sdivsi3_i4i + .global __sdivsi3_i4 + .global __sdivsi3 + .set __sdivsi3_i4, __sdivsi3_i4i + .set __sdivsi3, __sdivsi3_i4i +__sdivsi3_i4i: + mov.l r4,@-r15 + cmp/pz r5 + mov.l r5,@-r15 + bt/s pos_divisor + cmp/pz r4 + neg r5,r5 + extu.w r5,r0 + bt/s neg_result + cmp/eq r5,r0 + neg r4,r4 +pos_result: + swap.w r4,r0 + bra sdiv_check_divisor + sts pr,r1 +pos_divisor: + extu.w r5,r0 + bt/s pos_result + cmp/eq r5,r0 + neg r4,r4 +neg_result: + mova negate_result,r0 + ; + mov r0,r1 + swap.w r4,r0 + lds r2,macl + sts pr,r2 +sdiv_check_divisor: + shlr16 r4 + bf/s sdiv_large_divisor + div0u + bra sdiv_small_divisor + shll16 r5 + .balign 4 +negate_result: + neg r0,r0 + jmp @r2 + sts macl,r2 diff --git a/arch/sh/lib/udivsi3_i4i.S b/arch/sh/lib/udivsi3_i4i.S new file mode 100644 index 000000000000..f1a79d9c5015 --- /dev/null +++ b/arch/sh/lib/udivsi3_i4i.S @@ -0,0 +1,666 @@ +/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006 + Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +!! libgcc routines for the Renesas / SuperH SH CPUs. +!! Contributed by Steve Chamberlain. +!! sac@cygnus.com + +!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines +!! recoded in assembly by Toshiyasu Morita +!! tm@netcom.com + +/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and + ELF local label prefixes by J"orn Rennecke + amylaar@cygnus.com */ + +/* This code used shld, thus is not suitable for SH1 / SH2. */ + +/* Signed / unsigned division without use of FPU, optimized for SH4. + Uses a lookup table for divisors in the range -128 .. +128, and + div1 with case distinction for larger divisors in three more ranges. + The code is lumped together with the table to allow the use of mova. */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define L_LSB 0 +#define L_LSWMSB 1 +#define L_MSWLSB 2 +#else +#define L_LSB 3 +#define L_LSWMSB 2 +#define L_MSWLSB 1 +#endif + + .balign 4 + .global __udivsi3_i4i + .global __udivsi3_i4 + .set __udivsi3_i4, __udivsi3_i4i + .type __udivsi3_i4i, @function +__udivsi3_i4i: + mov.w c128_w, r1 + div0u + mov r4,r0 + shlr8 r0 + cmp/hi r1,r5 + extu.w r5,r1 + bf udiv_le128 + cmp/eq r5,r1 + bf udiv_ge64k + shlr r0 + mov r5,r1 + shll16 r5 + mov.l r4,@-r15 + div1 r5,r0 + mov.l r1,@-r15 + div1 r5,r0 + div1 r5,r0 + bra udiv_25 + div1 r5,r0 + +div_le128: + mova div_table_ix,r0 + bra div_le128_2 + mov.b @(r0,r5),r1 +udiv_le128: + mov.l r4,@-r15 + mova div_table_ix,r0 + mov.b @(r0,r5),r1 + mov.l r5,@-r15 +div_le128_2: + mova div_table_inv,r0 + mov.l @(r0,r1),r1 + mov r5,r0 + tst #0xfe,r0 + mova div_table_clz,r0 + dmulu.l r1,r4 + mov.b @(r0,r5),r1 + bt/s div_by_1 + mov r4,r0 + mov.l @r15+,r5 + sts mach,r0 + /* clrt */ + addc r4,r0 + mov.l @r15+,r4 + rotcr r0 + rts + shld r1,r0 + +div_by_1_neg: + neg r4,r0 +div_by_1: + mov.l @r15+,r5 + rts + mov.l @r15+,r4 + +div_ge64k: + bt/s div_r8 + div0u + shll8 r5 + bra div_ge64k_2 + div1 r5,r0 +udiv_ge64k: + cmp/hi r0,r5 + mov r5,r1 + bt udiv_r8 + shll8 r5 + mov.l r4,@-r15 + div1 r5,r0 + mov.l r1,@-r15 +div_ge64k_2: + div1 r5,r0 + mov.l zero_l,r1 + .rept 4 + div1 r5,r0 + .endr + mov.l r1,@-r15 + div1 r5,r0 + mov.w m256_w,r1 + div1 r5,r0 + mov.b r0,@(L_LSWMSB,r15) + xor r4,r0 + and r1,r0 + bra div_ge64k_end + xor r4,r0 + +div_r8: + shll16 r4 + bra div_r8_2 + shll8 r4 +udiv_r8: + mov.l r4,@-r15 + shll16 r4 + clrt + shll8 r4 + mov.l r5,@-r15 +div_r8_2: + rotcl r4 + mov r0,r1 + div1 r5,r1 + mov r4,r0 + rotcl r0 + mov r5,r4 + div1 r5,r1 + .rept 5 + rotcl r0; div1 r5,r1 + .endr + rotcl r0 + mov.l @r15+,r5 + div1 r4,r1 + mov.l @r15+,r4 + rts + rotcl r0 + + .global __sdivsi3_i4i + .global __sdivsi3_i4 + .global __sdivsi3 + .set __sdivsi3_i4, __sdivsi3_i4i + .set __sdivsi3, __sdivsi3_i4i + .type __sdivsi3_i4i, @function + /* This is link-compatible with a __sdivsi3 call, + but we effectively clobber only r1. */ +__sdivsi3_i4i: + mov.l r4,@-r15 + cmp/pz r5 + mov.w c128_w, r1 + bt/s pos_divisor + cmp/pz r4 + mov.l r5,@-r15 + neg r5,r5 + bt/s neg_result + cmp/hi r1,r5 + neg r4,r4 +pos_result: + extu.w r5,r0 + bf div_le128 + cmp/eq r5,r0 + mov r4,r0 + shlr8 r0 + bf/s div_ge64k + cmp/hi r0,r5 + div0u + shll16 r5 + div1 r5,r0 + div1 r5,r0 + div1 r5,r0 +udiv_25: + mov.l zero_l,r1 + div1 r5,r0 + div1 r5,r0 + mov.l r1,@-r15 + .rept 3 + div1 r5,r0 + .endr + mov.b r0,@(L_MSWLSB,r15) + xtrct r4,r0 + swap.w r0,r0 + .rept 8 + div1 r5,r0 + .endr + mov.b r0,@(L_LSWMSB,r15) +div_ge64k_end: + .rept 8 + div1 r5,r0 + .endr + mov.l @r15+,r4 ! zero-extension and swap using LS unit. + extu.b r0,r0 + mov.l @r15+,r5 + or r4,r0 + mov.l @r15+,r4 + rts + rotcl r0 + +div_le128_neg: + tst #0xfe,r0 + mova div_table_ix,r0 + mov.b @(r0,r5),r1 + mova div_table_inv,r0 + bt/s div_by_1_neg + mov.l @(r0,r1),r1 + mova div_table_clz,r0 + dmulu.l r1,r4 + mov.b @(r0,r5),r1 + mov.l @r15+,r5 + sts mach,r0 + /* clrt */ + addc r4,r0 + mov.l @r15+,r4 + rotcr r0 + shld r1,r0 + rts + neg r0,r0 + +pos_divisor: + mov.l r5,@-r15 + bt/s pos_result + cmp/hi r1,r5 + neg r4,r4 +neg_result: + extu.w r5,r0 + bf div_le128_neg + cmp/eq r5,r0 + mov r4,r0 + shlr8 r0 + bf/s div_ge64k_neg + cmp/hi r0,r5 + div0u + mov.l zero_l,r1 + shll16 r5 + div1 r5,r0 + mov.l r1,@-r15 + .rept 7 + div1 r5,r0 + .endr + mov.b r0,@(L_MSWLSB,r15) + xtrct r4,r0 + swap.w r0,r0 + .rept 8 + div1 r5,r0 + .endr + mov.b r0,@(L_LSWMSB,r15) +div_ge64k_neg_end: + .rept 8 + div1 r5,r0 + .endr + mov.l @r15+,r4 ! zero-extension and swap using LS unit. + extu.b r0,r1 + mov.l @r15+,r5 + or r4,r1 +div_r8_neg_end: + mov.l @r15+,r4 + rotcl r1 + rts + neg r1,r0 + +div_ge64k_neg: + bt/s div_r8_neg + div0u + shll8 r5 + mov.l zero_l,r1 + .rept 6 + div1 r5,r0 + .endr + mov.l r1,@-r15 + div1 r5,r0 + mov.w m256_w,r1 + div1 r5,r0 + mov.b r0,@(L_LSWMSB,r15) + xor r4,r0 + and r1,r0 + bra div_ge64k_neg_end + xor r4,r0 + +c128_w: + .word 128 + +div_r8_neg: + clrt + shll16 r4 + mov r4,r1 + shll8 r1 + mov r5,r4 + .rept 7 + rotcl r1; div1 r5,r0 + .endr + mov.l @r15+,r5 + rotcl r1 + bra div_r8_neg_end + div1 r4,r0 + +m256_w: + .word 0xff00 +/* This table has been generated by divtab-sh4.c. */ + .balign 4 +div_table_clz: + .byte 0 + .byte 1 + .byte 0 + .byte -1 + .byte -1 + .byte -2 + .byte -2 + .byte -2 + .byte -2 + .byte -3 + .byte -3 + .byte -3 + .byte -3 + .byte -3 + .byte -3 + .byte -3 + .byte -3 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -4 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -5 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 + .byte -6 +/* Lookup table translating positive divisor to index into table of + normalized inverse. N.B. the '0' entry is also the last entry of the + previous table, and causes an unaligned access for division by zero. */ +div_table_ix: + .byte -6 + .byte -128 + .byte -128 + .byte 0 + .byte -128 + .byte -64 + .byte 0 + .byte 64 + .byte -128 + .byte -96 + .byte -64 + .byte -32 + .byte 0 + .byte 32 + .byte 64 + .byte 96 + .byte -128 + .byte -112 + .byte -96 + .byte -80 + .byte -64 + .byte -48 + .byte -32 + .byte -16 + .byte 0 + .byte 16 + .byte 32 + .byte 48 + .byte 64 + .byte 80 + .byte 96 + .byte 112 + .byte -128 + .byte -120 + .byte -112 + .byte -104 + .byte -96 + .byte -88 + .byte -80 + .byte -72 + .byte -64 + .byte -56 + .byte -48 + .byte -40 + .byte -32 + .byte -24 + .byte -16 + .byte -8 + .byte 0 + .byte 8 + .byte 16 + .byte 24 + .byte 32 + .byte 40 + .byte 48 + .byte 56 + .byte 64 + .byte 72 + .byte 80 + .byte 88 + .byte 96 + .byte 104 + .byte 112 + .byte 120 + .byte -128 + .byte -124 + .byte -120 + .byte -116 + .byte -112 + .byte -108 + .byte -104 + .byte -100 + .byte -96 + .byte -92 + .byte -88 + .byte -84 + .byte -80 + .byte -76 + .byte -72 + .byte -68 + .byte -64 + .byte -60 + .byte -56 + .byte -52 + .byte -48 + .byte -44 + .byte -40 + .byte -36 + .byte -32 + .byte -28 + .byte -24 + .byte -20 + .byte -16 + .byte -12 + .byte -8 + .byte -4 + .byte 0 + .byte 4 + .byte 8 + .byte 12 + .byte 16 + .byte 20 + .byte 24 + .byte 28 + .byte 32 + .byte 36 + .byte 40 + .byte 44 + .byte 48 + .byte 52 + .byte 56 + .byte 60 + .byte 64 + .byte 68 + .byte 72 + .byte 76 + .byte 80 + .byte 84 + .byte 88 + .byte 92 + .byte 96 + .byte 100 + .byte 104 + .byte 108 + .byte 112 + .byte 116 + .byte 120 + .byte 124 + .byte -128 +/* 1/64 .. 1/127, normalized. There is an implicit leading 1 in bit 32. */ + .balign 4 +zero_l: + .long 0x0 + .long 0xF81F81F9 + .long 0xF07C1F08 + .long 0xE9131AC0 + .long 0xE1E1E1E2 + .long 0xDAE6076C + .long 0xD41D41D5 + .long 0xCD856891 + .long 0xC71C71C8 + .long 0xC0E07039 + .long 0xBACF914D + .long 0xB4E81B4F + .long 0xAF286BCB + .long 0xA98EF607 + .long 0xA41A41A5 + .long 0x9EC8E952 + .long 0x9999999A + .long 0x948B0FCE + .long 0x8F9C18FA + .long 0x8ACB90F7 + .long 0x86186187 + .long 0x81818182 + .long 0x7D05F418 + .long 0x78A4C818 + .long 0x745D1746 + .long 0x702E05C1 + .long 0x6C16C16D + .long 0x68168169 + .long 0x642C8591 + .long 0x60581606 + .long 0x5C9882BA + .long 0x58ED2309 +div_table_inv: + .long 0x55555556 + .long 0x51D07EAF + .long 0x4E5E0A73 + .long 0x4AFD6A06 + .long 0x47AE147B + .long 0x446F8657 + .long 0x41414142 + .long 0x3E22CBCF + .long 0x3B13B13C + .long 0x38138139 + .long 0x3521CFB3 + .long 0x323E34A3 + .long 0x2F684BDB + .long 0x2C9FB4D9 + .long 0x29E4129F + .long 0x27350B89 + .long 0x24924925 + .long 0x21FB7813 + .long 0x1F7047DD + .long 0x1CF06ADB + .long 0x1A7B9612 + .long 0x18118119 + .long 0x15B1E5F8 + .long 0x135C8114 + .long 0x11111112 + .long 0xECF56BF + .long 0xC9714FC + .long 0xA6810A7 + .long 0x8421085 + .long 0x624DD30 + .long 0x4104105 + .long 0x2040811 + /* maximum error: 0.987342 scaled: 0.921875*/ diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile index 9950966923a0..4bacb9e83478 100644 --- a/arch/sh/lib64/Makefile +++ b/arch/sh/lib64/Makefile @@ -2,7 +2,7 @@ # Makefile for the SH-5 specific library files.. # # Copyright (C) 2000, 2001 Paolo Alberelli -# Copyright (C) 2003 Paul Mundt +# Copyright (C) 2003 - 2008 Paul Mundt # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -10,6 +10,8 @@ # # Panic should really be compiled as PIC -lib-y := udelay.o c-checksum.o dbg.o panic.o memcpy.o copy_user_memcpy.o \ - copy_page.o clear_page.o +lib-y := udelay.o c-checksum.o dbg.o panic.o memcpy.o memset.o \ + copy_user_memcpy.o copy_page.o clear_page.o strcpy.o strlen.o +# Extracted from libgcc +lib-y += udivsi3.o udivdi3.o sdivsi3.o diff --git a/arch/sh/lib64/c-checksum.c b/arch/sh/lib64/c-checksum.c index 5c284e0cff9c..73c0877e3a29 100644 --- a/arch/sh/lib64/c-checksum.c +++ b/arch/sh/lib64/c-checksum.c @@ -35,7 +35,7 @@ static inline unsigned short foldto16(unsigned long x) static inline unsigned short myfoldto16(unsigned long long x) { - /* Fold down to 32-bits so we don't loose in the typedef-less + /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ /* 64 to 33 */ x = (x & 0xffffffff) + (x >> 32); @@ -199,7 +199,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, result = (__force u64) saddr + (__force u64) daddr + (__force u64) sum + ((len + proto) << 8); - /* Fold down to 32-bits so we don't loose in the typedef-less + /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ /* 64 to 33 */ result = (result & 0xffffffff) + (result >> 32); diff --git a/arch/sh/lib64/memcpy.S b/arch/sh/lib64/memcpy.S new file mode 100644 index 000000000000..dd300c372ce1 --- /dev/null +++ b/arch/sh/lib64/memcpy.S @@ -0,0 +1,201 @@ +/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */ +/* Modified by SuperH, Inc. September 2003 */ +! +! Fast SH memcpy +! +! by Toshiyasu Morita (tm@netcom.com) +! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut) +! SH5 code Copyright 2002 SuperH Ltd. +! +! Entry: ARG0: destination pointer +! ARG1: source pointer +! ARG2: byte count +! +! Exit: RESULT: destination pointer +! any other registers in the range r0-r7: trashed +! +! Notes: Usually one wants to do small reads and write a longword, but +! unfortunately it is difficult in some cases to concatanate bytes +! into a longword on the SH, so this does a longword read and small +! writes. +! +! This implementation makes two assumptions about how it is called: +! +! 1.: If the byte count is nonzero, the address of the last byte to be +! copied is unsigned greater than the address of the first byte to +! be copied. This could be easily swapped for a signed comparison, +! but the algorithm used needs some comparison. +! +! 2.: When there are two or three bytes in the last word of an 11-or-more +! bytes memory chunk to b copied, the rest of the word can be read +! without side effects. +! This could be easily changed by increasing the minumum size of +! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2, +! however, this would cost a few extra cyles on average. +! For SHmedia, the assumption is that any quadword can be read in its +! enirety if at least one byte is included in the copy. +! + + .section .text..SHmedia32,"ax" + .globl memcpy + .type memcpy, @function + .align 5 + +memcpy: + +#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1 +#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1 +#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1 +#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1 + + ld.b r3,0,r63 + pta/l Large,tr0 + movi 25,r0 + bgeu/u r4,r0,tr0 + nsb r4,r0 + shlli r0,5,r0 + movi (L1-L0+63*32 + 1) & 0xffff,r1 + sub r1, r0, r0 +L0: ptrel r0,tr0 + add r2,r4,r5 + ptabs r18,tr1 + add r3,r4,r6 + blink tr0,r63 + +/* Rearranged to make cut2 safe */ + .balign 8 +L4_7: /* 4..7 byte memcpy cntd. */ + stlo.l r2, 0, r0 + or r6, r7, r6 + sthi.l r5, -1, r6 + stlo.l r5, -4, r6 + blink tr1,r63 + + .balign 8 +L1: /* 0 byte memcpy */ + nop + blink tr1,r63 + nop + nop + nop + nop + +L2_3: /* 2 or 3 byte memcpy cntd. */ + st.b r5,-1,r6 + blink tr1,r63 + + /* 1 byte memcpy */ + ld.b r3,0,r0 + st.b r2,0,r0 + blink tr1,r63 + +L8_15: /* 8..15 byte memcpy cntd. */ + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + + /* 2 or 3 byte memcpy */ + ld.b r3,0,r0 + ld.b r2,0,r63 + ld.b r3,1,r1 + st.b r2,0,r0 + pta/l L2_3,tr0 + ld.b r6,-1,r6 + st.b r2,1,r1 + blink tr0, r63 + + /* 4 .. 7 byte memcpy */ + LDUAL (r3, 0, r0, r1) + pta L4_7, tr0 + ldlo.l r6, -4, r7 + or r0, r1, r0 + sthi.l r2, 3, r0 + ldhi.l r6, -1, r6 + blink tr0, r63 + + /* 8 .. 15 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + pta L8_15, tr0 + ldlo.q r6, -8, r7 + or r0, r1, r0 + sthi.q r2, 7, r0 + ldhi.q r6, -1, r6 + blink tr0, r63 + + /* 16 .. 24 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + LDUAQ (r3, 8, r8, r9) + or r0, r1, r0 + sthi.q r2, 7, r0 + or r8, r9, r8 + sthi.q r2, 15, r8 + ldlo.q r6, -8, r7 + ldhi.q r6, -1, r6 + stlo.q r2, 8, r8 + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + +Large: + ld.b r2, 0, r63 + pta/l Loop_ua, tr1 + ori r3, -8, r7 + sub r2, r7, r22 + sub r3, r2, r6 + add r2, r4, r5 + ldlo.q r3, 0, r0 + addi r5, -16, r5 + movi 64+8, r27 // could subtract r7 from that. + stlo.q r2, 0, r0 + sthi.q r2, 7, r0 + ldx.q r22, r6, r0 + bgtu/l r27, r4, tr1 + + addi r5, -48, r27 + pta/l Loop_line, tr0 + addi r6, 64, r36 + addi r6, -24, r19 + addi r6, -16, r20 + addi r6, -8, r21 + +Loop_line: + ldx.q r22, r36, r63 + alloco r22, 32 + addi r22, 32, r22 + ldx.q r22, r19, r23 + sthi.q r22, -25, r0 + ldx.q r22, r20, r24 + ldx.q r22, r21, r25 + stlo.q r22, -32, r0 + ldx.q r22, r6, r0 + sthi.q r22, -17, r23 + sthi.q r22, -9, r24 + sthi.q r22, -1, r25 + stlo.q r22, -24, r23 + stlo.q r22, -16, r24 + stlo.q r22, -8, r25 + bgeu r27, r22, tr0 + +Loop_ua: + addi r22, 8, r22 + sthi.q r22, -1, r0 + stlo.q r22, -8, r0 + ldx.q r22, r6, r0 + bgtu/l r5, r22, tr1 + + add r3, r4, r7 + ldlo.q r7, -8, r1 + sthi.q r22, 7, r0 + ldhi.q r7, -1, r7 + ptabs r18,tr1 + stlo.q r22, 0, r0 + or r1, r7, r1 + sthi.q r5, 15, r1 + stlo.q r5, 8, r1 + blink tr1, r63 + + .size memcpy,.-memcpy diff --git a/arch/sh/lib64/memcpy.c b/arch/sh/lib64/memcpy.c deleted file mode 100644 index fba436a92bfa..000000000000 --- a/arch/sh/lib64/memcpy.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2002 Mark Debbage (Mark.Debbage@superh.com) - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - */ - -#include <linux/types.h> -#include <asm/string.h> - -// This is a simplistic optimization of memcpy to increase the -// granularity of access beyond one byte using aligned -// loads and stores. This is not an optimal implementation -// for SH-5 (especially with regard to prefetching and the cache), -// and a better version should be provided later ... - -void *memcpy(void *dest, const void *src, size_t count) -{ - char *d = (char *) dest, *s = (char *) src; - - if (count >= 32) { - int i = 8 - (((unsigned long) d) & 0x7); - - if (i != 8) - while (i-- && count--) { - *d++ = *s++; - } - - if (((((unsigned long) d) & 0x7) == 0) && - ((((unsigned long) s) & 0x7) == 0)) { - while (count >= 32) { - unsigned long long t1, t2, t3, t4; - t1 = *(unsigned long long *) (s); - t2 = *(unsigned long long *) (s + 8); - t3 = *(unsigned long long *) (s + 16); - t4 = *(unsigned long long *) (s + 24); - *(unsigned long long *) (d) = t1; - *(unsigned long long *) (d + 8) = t2; - *(unsigned long long *) (d + 16) = t3; - *(unsigned long long *) (d + 24) = t4; - d += 32; - s += 32; - count -= 32; - } - while (count >= 8) { - *(unsigned long long *) d = - *(unsigned long long *) s; - d += 8; - s += 8; - count -= 8; - } - } - - if (((((unsigned long) d) & 0x3) == 0) && - ((((unsigned long) s) & 0x3) == 0)) { - while (count >= 4) { - *(unsigned long *) d = *(unsigned long *) s; - d += 4; - s += 4; - count -= 4; - } - } - - if (((((unsigned long) d) & 0x1) == 0) && - ((((unsigned long) s) & 0x1) == 0)) { - while (count >= 2) { - *(unsigned short *) d = *(unsigned short *) s; - d += 2; - s += 2; - count -= 2; - } - } - } - - while (count--) { - *d++ = *s++; - } - - return d; -} diff --git a/arch/sh/lib64/memset.S b/arch/sh/lib64/memset.S new file mode 100644 index 000000000000..2d37b0488552 --- /dev/null +++ b/arch/sh/lib64/memset.S @@ -0,0 +1,91 @@ +/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */ +/* Modified by SuperH, Inc. September 2003 */ +! +! Fast SH memset +! +! by Toshiyasu Morita (tm@netcom.com) +! +! SH5 code by J"orn Rennecke (joern.rennecke@superh.com) +! Copyright 2002 SuperH Ltd. +! + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define SHHI shlld +#define SHLO shlrd +#else +#define SHHI shlrd +#define SHLO shlld +#endif + + .section .text..SHmedia32,"ax" + .globl memset + .type memset, @function + + .align 5 + +memset: + pta/l multiquad, tr0 + andi r2, 7, r22 + ptabs r18, tr2 + mshflo.b r3,r3,r3 + add r4, r22, r23 + mperm.w r3, r63, r3 // Fill pattern now in every byte of r3 + + movi 8, r9 + bgtu/u r23, r9, tr0 // multiquad + + beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses + ldlo.q r2, 0, r7 + shlli r4, 2, r4 + movi -1, r8 + SHHI r8, r4, r8 + SHHI r8, r4, r8 + mcmv r7, r8, r3 + stlo.q r2, 0, r3 + blink tr2, r63 + +multiquad: + pta/l lastquad, tr0 + stlo.q r2, 0, r3 + shlri r23, 3, r24 + add r2, r4, r5 + beqi/u r24, 1, tr0 // lastquad + pta/l loop, tr1 + sub r2, r22, r25 + andi r5, -8, r20 // calculate end address and + addi r20, -7*8, r8 // loop end address; This might overflow, so we need + // to use a different test before we start the loop + bge/u r24, r9, tr1 // loop + st.q r25, 8, r3 + st.q r20, -8, r3 + shlri r24, 1, r24 + beqi/u r24, 1, tr0 // lastquad + st.q r25, 16, r3 + st.q r20, -16, r3 + beqi/u r24, 2, tr0 // lastquad + st.q r25, 24, r3 + st.q r20, -24, r3 +lastquad: + sthi.q r5, -1, r3 + blink tr2,r63 + +loop: +!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895. + // QQQ commenting out is locically correct, but sub-optimal + // QQQ Sean McGoogan - 4th April 2003. + st.q r25, 8, r3 + st.q r25, 16, r3 + st.q r25, 24, r3 + st.q r25, 32, r3 + addi r25, 32, r25 + bgeu/l r8, r25, tr1 // loop + + st.q r20, -40, r3 + st.q r20, -32, r3 + st.q r20, -24, r3 + st.q r20, -16, r3 + st.q r20, -8, r3 + sthi.q r5, -1, r3 + blink tr2,r63 + + .size memset,.-memset diff --git a/arch/sh/lib64/sdivsi3.S b/arch/sh/lib64/sdivsi3.S new file mode 100644 index 000000000000..6a800c6a4904 --- /dev/null +++ b/arch/sh/lib64/sdivsi3.S @@ -0,0 +1,131 @@ + .global __sdivsi3 + .section .text..SHmedia32,"ax" + .align 2 + + /* inputs: r4,r5 */ + /* clobbered: r1,r18,r19,r20,r21,r25,tr0 */ + /* result in r0 */ +__sdivsi3: + ptb __div_table,tr0 + + nsb r5, r1 + shlld r5, r1, r25 /* normalize; [-2 ..1, 1..2) in s2.62 */ + shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */ + /* bubble */ + gettr tr0,r20 + ldx.ub r20, r21, r19 /* u0.8 */ + shari r25, 32, r25 /* normalize to s2.30 */ + shlli r21, 1, r21 + muls.l r25, r19, r19 /* s2.38 */ + ldx.w r20, r21, r21 /* s2.14 */ + ptabs r18, tr0 + shari r19, 24, r19 /* truncate to s2.14 */ + sub r21, r19, r19 /* some 11 bit inverse in s1.14 */ + muls.l r19, r19, r21 /* u0.28 */ + sub r63, r1, r1 + addi r1, 92, r1 + muls.l r25, r21, r18 /* s2.58 */ + shlli r19, 45, r19 /* multiply by two and convert to s2.58 */ + /* bubble */ + sub r19, r18, r18 + shari r18, 28, r18 /* some 22 bit inverse in s1.30 */ + muls.l r18, r25, r0 /* s2.60 */ + muls.l r18, r4, r25 /* s32.30 */ + /* bubble */ + shari r0, 16, r19 /* s-16.44 */ + muls.l r19, r18, r19 /* s-16.74 */ + shari r25, 63, r0 + shari r4, 14, r18 /* s19.-14 */ + shari r19, 30, r19 /* s-16.44 */ + muls.l r19, r18, r19 /* s15.30 */ + xor r21, r0, r21 /* You could also use the constant 1 << 27. */ + add r21, r25, r21 + sub r21, r19, r21 + shard r21, r1, r21 + sub r21, r0, r0 + blink tr0, r63 + +/* This table has been generated by divtab.c . +Defects for bias -330: + Max defect: 6.081536e-07 at -1.000000e+00 + Min defect: 2.849516e-08 at 1.030651e+00 + Max 2nd step defect: 9.606539e-12 at -1.000000e+00 + Min 2nd step defect: 0.000000e+00 at 0.000000e+00 + Defect at 1: 1.238659e-07 + Defect at -2: 1.061708e-07 */ + + .balign 2 + .type __div_table,@object + .size __div_table,128 +/* negative division constants */ + .word -16638 + .word -17135 + .word -17737 + .word -18433 + .word -19103 + .word -19751 + .word -20583 + .word -21383 + .word -22343 + .word -23353 + .word -24407 + .word -25582 + .word -26863 + .word -28382 + .word -29965 + .word -31800 +/* negative division factors */ + .byte 66 + .byte 70 + .byte 75 + .byte 81 + .byte 87 + .byte 93 + .byte 101 + .byte 109 + .byte 119 + .byte 130 + .byte 142 + .byte 156 + .byte 172 + .byte 192 + .byte 214 + .byte 241 + .skip 16 + .global __div_table +__div_table: + .skip 16 +/* positive division factors */ + .byte 241 + .byte 214 + .byte 192 + .byte 172 + .byte 156 + .byte 142 + .byte 130 + .byte 119 + .byte 109 + .byte 101 + .byte 93 + .byte 87 + .byte 81 + .byte 75 + .byte 70 + .byte 66 +/* positive division constants */ + .word 31801 + .word 29966 + .word 28383 + .word 26864 + .word 25583 + .word 24408 + .word 23354 + .word 22344 + .word 21384 + .word 20584 + .word 19752 + .word 19104 + .word 18434 + .word 17738 + .word 17136 + .word 16639 diff --git a/arch/sh/lib64/strcpy.S b/arch/sh/lib64/strcpy.S new file mode 100644 index 000000000000..ea7c9c533eea --- /dev/null +++ b/arch/sh/lib64/strcpy.S @@ -0,0 +1,97 @@ +/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */ +/* Modified by SuperH, Inc. September 2003 */ +! Entry: arg0: destination +! arg1: source +! Exit: result: destination +! +! SH5 code Copyright 2002 SuperH Ltd. + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define SHHI shlld +#define SHLO shlrd +#else +#define SHHI shlrd +#define SHLO shlld +#endif + + .section .text..SHmedia32,"ax" + .globl strcpy + .type strcpy, @function + .align 5 + +strcpy: + + pta/l shortstring,tr1 + ldlo.q r3,0,r4 + ptabs r18,tr4 + shlli r3,3,r7 + addi r2, 8, r0 + mcmpeq.b r4,r63,r6 + SHHI r6,r7,r6 + bnei/u r6,0,tr1 // shortstring + pta/l no_lddst, tr2 + ori r3,-8,r23 + sub r2, r23, r0 + sub r3, r2, r21 + addi r21, 8, r20 + ldx.q r0, r21, r5 + pta/l loop, tr0 + ori r2,-8,r22 + mcmpeq.b r5, r63, r6 + bgt/u r22, r23, tr2 // no_lddst + + // r22 < r23 : Need to do a load from the destination. + // r22 == r23 : Doesn't actually need to load from destination, + // but still can be handled here. + ldlo.q r2, 0, r9 + movi -1, r8 + SHLO r8, r7, r8 + mcmv r4, r8, r9 + stlo.q r2, 0, r9 + beqi/l r6, 0, tr0 // loop + + add r5, r63, r4 + addi r0, 8, r0 + blink tr1, r63 // shortstring +no_lddst: + // r22 > r23: note that for r22 == r23 the sthi.q would clobber + // bytes before the destination region. + stlo.q r2, 0, r4 + SHHI r4, r7, r4 + sthi.q r0, -1, r4 + beqi/l r6, 0, tr0 // loop + + add r5, r63, r4 + addi r0, 8, r0 +shortstring: +#if __BYTE_ORDER != __LITTLE_ENDIAN + pta/l shortstring2,tr1 + byterev r4,r4 +#endif +shortstring2: + st.b r0,-8,r4 + andi r4,0xff,r5 + shlri r4,8,r4 + addi r0,1,r0 + bnei/l r5,0,tr1 + blink tr4,r63 // return + + .balign 8 +loop: + stlo.q r0, 0, r5 + ldx.q r0, r20, r4 + addi r0, 16, r0 + sthi.q r0, -9, r5 + mcmpeq.b r4, r63, r6 + bnei/u r6, 0, tr1 // shortstring + ldx.q r0, r21, r5 + stlo.q r0, -8, r4 + sthi.q r0, -1, r4 + mcmpeq.b r5, r63, r6 + beqi/l r6, 0, tr0 // loop + + add r5, r63, r4 + addi r0, 8, r0 + blink tr1, r63 // shortstring + + .size strcpy,.-strcpy diff --git a/arch/sh/lib64/strlen.S b/arch/sh/lib64/strlen.S new file mode 100644 index 000000000000..cbc0d912e5f3 --- /dev/null +++ b/arch/sh/lib64/strlen.S @@ -0,0 +1,33 @@ +/* + * Simplistic strlen() implementation for SHmedia. + * + * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> + */ + + .section .text..SHmedia32,"ax" + .globl strlen + .type strlen,@function + + .balign 16 +strlen: + ptabs r18, tr4 + + /* + * Note: We could easily deal with the NULL case here with a simple + * sanity check, though it seems that the behavior we want is to fault + * in the event that r2 == NULL, so we don't bother. + */ +/* beqi r2, 0, tr4 */ ! Sanity check + + movi -1, r0 + pta/l loop, tr0 +loop: + ld.b r2, 0, r1 + addi r2, 1, r2 + addi r0, 1, r0 + bnei/l r1, 0, tr0 + + or r0, r63, r2 + blink tr4, r63 + + .size strlen,.-strlen diff --git a/arch/sh/lib64/udivdi3.S b/arch/sh/lib64/udivdi3.S new file mode 100644 index 000000000000..6895c0225b85 --- /dev/null +++ b/arch/sh/lib64/udivdi3.S @@ -0,0 +1,120 @@ + .section .text..SHmedia32,"ax" + .align 2 + .global __udivdi3 +__udivdi3: + shlri r3,1,r4 + nsb r4,r22 + shlld r3,r22,r6 + shlri r6,49,r5 + movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */ + sub r21,r5,r1 + mmulfx.w r1,r1,r4 + mshflo.w r1,r63,r1 + sub r63,r22,r20 // r63 == 64 % 64 + mmulfx.w r5,r4,r4 + pta large_divisor,tr0 + addi r20,32,r9 + msub.w r1,r4,r1 + madd.w r1,r1,r1 + mmulfx.w r1,r1,r4 + shlri r6,32,r7 + bgt/u r9,r63,tr0 // large_divisor + mmulfx.w r5,r4,r4 + shlri r2,32+14,r19 + addi r22,-31,r0 + msub.w r1,r4,r1 + + mulu.l r1,r7,r4 + addi r1,-3,r5 + mulu.l r5,r19,r5 + sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2 + shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as + the case may be, %0000000000000000 000.11111111111, still */ + muls.l r1,r4,r4 /* leaving at least one sign bit. */ + mulu.l r5,r3,r8 + mshalds.l r1,r21,r1 + shari r4,26,r4 + shlld r8,r0,r8 + add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5) + sub r2,r8,r2 + /* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */ + + shlri r2,22,r21 + mulu.l r21,r1,r21 + shlld r5,r0,r8 + addi r20,30-22,r0 + shlrd r21,r0,r21 + mulu.l r21,r3,r5 + add r8,r21,r8 + mcmpgt.l r21,r63,r21 // See Note 1 + addi r20,30,r0 + mshfhi.l r63,r21,r21 + sub r2,r5,r2 + andc r2,r21,r2 + + /* small divisor: need a third divide step */ + mulu.l r2,r1,r7 + ptabs r18,tr0 + addi r2,1,r2 + shlrd r7,r0,r7 + mulu.l r7,r3,r5 + add r8,r7,r8 + sub r2,r3,r2 + cmpgt r2,r5,r5 + add r8,r5,r2 + /* could test r3 here to check for divide by zero. */ + blink tr0,r63 + +large_divisor: + mmulfx.w r5,r4,r4 + shlrd r2,r9,r25 + shlri r25,32,r8 + msub.w r1,r4,r1 + + mulu.l r1,r7,r4 + addi r1,-3,r5 + mulu.l r5,r8,r5 + sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2 + shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as + the case may be, %0000000000000000 000.11111111111, still */ + muls.l r1,r4,r4 /* leaving at least one sign bit. */ + shlri r5,14-1,r8 + mulu.l r8,r7,r5 + mshalds.l r1,r21,r1 + shari r4,26,r4 + add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5) + sub r25,r5,r25 + /* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */ + + shlri r25,22,r21 + mulu.l r21,r1,r21 + pta no_lo_adj,tr0 + addi r22,32,r0 + shlri r21,40,r21 + mulu.l r21,r7,r5 + add r8,r21,r8 + shlld r2,r0,r2 + sub r25,r5,r25 + bgtu/u r7,r25,tr0 // no_lo_adj + addi r8,1,r8 + sub r25,r7,r25 +no_lo_adj: + mextr4 r2,r25,r2 + + /* large_divisor: only needs a few adjustments. */ + mulu.l r8,r6,r5 + ptabs r18,tr0 + /* bubble */ + cmpgtu r5,r2,r5 + sub r8,r5,r2 + blink tr0,r63 + +/* Note 1: To shift the result of the second divide stage so that the result + always fits into 32 bits, yet we still reduce the rest sufficiently + would require a lot of instructions to do the shifts just right. Using + the full 64 bit shift result to multiply with the divisor would require + four extra instructions for the upper 32 bits (shift / mulu / shift / sub). + Fortunately, if the upper 32 bits of the shift result are nonzero, we + know that the rest after taking this partial result into account will + fit into 32 bits. So we just clear the upper 32 bits of the rest if the + upper 32 bits of the partial result are nonzero. */ diff --git a/arch/sh/lib64/udivsi3.S b/arch/sh/lib64/udivsi3.S new file mode 100644 index 000000000000..e68120e4b847 --- /dev/null +++ b/arch/sh/lib64/udivsi3.S @@ -0,0 +1,59 @@ + .global __udivsi3 + .section .text..SHmedia32,"ax" + .align 2 + +/* + inputs: r4,r5 + clobbered: r18,r19,r20,r21,r22,r25,tr0 + result in r0. + */ +__udivsi3: + addz.l r5,r63,r22 + nsb r22,r0 + shlld r22,r0,r25 + shlri r25,48,r25 + movi 0xffffffffffffbb0c,r20 /* shift count eqiv 76 */ + sub r20,r25,r21 + mmulfx.w r21,r21,r19 + mshflo.w r21,r63,r21 + ptabs r18,tr0 + mmulfx.w r25,r19,r19 + sub r20,r0,r0 + /* bubble */ + msub.w r21,r19,r19 + + /* + * It would be nice for scheduling to do this add to r21 before + * the msub.w, but we need a different value for r19 to keep + * errors under control. + */ + addi r19,-2,r21 + mulu.l r4,r21,r18 + mmulfx.w r19,r19,r19 + shlli r21,15,r21 + shlrd r18,r0,r18 + mulu.l r18,r22,r20 + mmacnfx.wl r25,r19,r21 + /* bubble */ + sub r4,r20,r25 + + mulu.l r25,r21,r19 + addi r0,14,r0 + /* bubble */ + shlrd r19,r0,r19 + mulu.l r19,r22,r20 + add r18,r19,r18 + /* bubble */ + sub.l r25,r20,r25 + + mulu.l r25,r21,r19 + addz.l r25,r63,r25 + sub r25,r22,r25 + shlrd r19,r0,r19 + mulu.l r19,r22,r20 + addi r25,1,r25 + add r18,r19,r18 + + cmpgt r25,r20,r25 + add.l r18,r25,r0 + blink tr0,r63 diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index f066e76da204..cb2f3f299591 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -18,6 +18,7 @@ mmu-y := tlb-nommu.o pg-nommu.o mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o obj-y += $(mmu-y) +obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o ifdef CONFIG_DEBUG_FS obj-$(CONFIG_CPU_SH4) += cache-debugfs.o diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64 index 9481d0f54efd..2863ffb7006d 100644 --- a/arch/sh/mm/Makefile_64 +++ b/arch/sh/mm/Makefile_64 @@ -13,6 +13,7 @@ obj-y += cache-sh5.o endif obj-y += $(mmu-y) +obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c new file mode 100644 index 000000000000..8e912a15e94f --- /dev/null +++ b/arch/sh/mm/asids-debugfs.c @@ -0,0 +1,79 @@ +/* + * debugfs ops for process ASIDs + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 - 2008 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + * Provides a debugfs file that lists out the ASIDs currently associated + * with the processes. + * + * In the SH-5 case, if the DM.PC register is examined through the debug + * link, this shows ASID + PC. To make use of this, the PID->ASID + * relationship needs to be known. This is primarily for debugging. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <asm/processor.h> +#include <asm/mmu_context.h> + +static int asids_seq_show(struct seq_file *file, void *iter) +{ + struct task_struct *p; + + read_lock(&tasklist_lock); + + for_each_process(p) { + int pid = p->pid; + + if (unlikely(!pid)) + continue; + + if (p->mm) + seq_printf(file, "%5d : %02lx\n", pid, + cpu_asid(smp_processor_id(), p->mm)); + else + seq_printf(file, "%5d : (none)\n", pid); + } + + read_unlock(&tasklist_lock); + + return 0; +} + +static int asids_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, asids_seq_show, inode->i_private); +} + +static const struct file_operations asids_debugfs_fops = { + .owner = THIS_MODULE, + .open = asids_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init asids_debugfs_init(void) +{ + struct dentry *asids_dentry; + + asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root, + NULL, &asids_debugfs_fops); + if (!asids_dentry) + return -ENOMEM; + if (IS_ERR(asids_dentry)) + return PTR_ERR(asids_dentry); + + return 0; +} +module_init(asids_debugfs_init); + +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 9f8ea3ada4db..edcd5fbf9651 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -42,6 +42,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size, return NULL; } + split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); + *dma_handle = virt_to_phys(ret); return ret_nocache; } @@ -51,10 +53,13 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { int order = get_order(size); + unsigned long pfn = dma_handle >> PAGE_SHIFT; + int k; if (!dma_release_from_coherent(dev, order, vaddr)) { WARN_ON(irqs_disabled()); /* for portability */ - free_pages((unsigned long)phys_to_virt(dma_handle), order); + for (k = 0; k < (1 << order); k++) + __free_pages(pfn_to_page(pfn + k), 0); iounmap(vaddr); } } diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 898d477e47c1..31a33ebdef6f 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -20,7 +20,6 @@ #include <asm/system.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> -#include <asm/kgdb.h> /* * This routine handles page faults. It determines the address, @@ -265,17 +264,6 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) return ret; } -#ifdef CONFIG_SH_STORE_QUEUES -/* - * This is a special case for the SH-4 store queues, as pages for this - * space still need to be faulted in before it's possible to flush the - * store queue cache for writeout to the remapped region. - */ -#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) -#else -#define P3_ADDR_MAX P4SEG -#endif - /* * Called with interrupts disabled. */ @@ -293,11 +281,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, if (notify_page_fault(regs, lookup_exception_vector())) goto out; -#ifdef CONFIG_SH_KGDB - if (kgdb_nofault && kgdb_bus_err_hook) - kgdb_bus_err_hook(); -#endif - ret = 1; /* diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 882a32ebc6b7..32946fba123e 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -116,9 +116,10 @@ EXPORT_SYMBOL(__ioremap); void __iounmap(void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; + unsigned long seg = PXSEG(vaddr); struct vm_struct *p; - if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr)) + if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; #ifdef CONFIG_32BIT diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 8837d511710a..931f4d003fa0 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -9,7 +9,101 @@ */ #include <linux/io.h> #include <linux/mm.h> +#include <linux/mman.h> +#include <linux/module.h> #include <asm/page.h> +#include <asm/processor.h> + +#ifdef CONFIG_MMU +unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ +EXPORT_SYMBOL(shm_align_mask); + +/* + * To avoid cache aliases, we map the shared page with same color. + */ +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long start_addr; + int do_colour_align; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && (addr & shm_align_mask)) + return -EINVAL; + return addr; + } + + if (unlikely(len > TASK_SIZE)) + return -ENOMEM; + + do_colour_align = 0; + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + + if (addr) { + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + mm->cached_hole_size = 0; + start_addr = addr = TASK_UNMAPPED_BASE; + } + +full_search: + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(mm->free_area_cache); + + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (unlikely(TASK_SIZE - len < addr)) { + /* + * Start a new search - just in case we missed + * some holes. + */ + if (start_addr != TASK_UNMAPPED_BASE) { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } + if (likely(!vma || addr + len <= vma->vm_start)) { + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; + } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + addr = vma->vm_end; + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); + } +} +#endif /* CONFIG_MMU */ /* * You really shouldn't be using read() or write() on /dev/mem. This diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile index 2efc2e79fd29..8e6eec91c14c 100644 --- a/arch/sh/oprofile/Makefile +++ b/arch/sh/oprofile/Makefile @@ -6,13 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprofilefs.o oprofile_stats.o \ timer_int.o ) -profdrvr-y := op_model_null.o +oprofile-y := $(DRIVER_OBJS) common.o backtrace.o -# SH7750-style performance counters exist across 7750/7750S and 7091. -profdrvr-$(CONFIG_CPU_SUBTYPE_SH7750S) := op_model_sh7750.o -profdrvr-$(CONFIG_CPU_SUBTYPE_SH7750) := op_model_sh7750.o -profdrvr-$(CONFIG_CPU_SUBTYPE_SH7091) := op_model_sh7750.o - -oprofile-y := $(DRIVER_OBJS) $(profdrvr-y) - -EXTRA_CFLAGS += -Werror +oprofile-$(CONFIG_CPU_SUBTYPE_SH7750S) += op_model_sh7750.o +oprofile-$(CONFIG_CPU_SUBTYPE_SH7750) += op_model_sh7750.o +oprofile-$(CONFIG_CPU_SUBTYPE_SH7091) += op_model_sh7750.o diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c new file mode 100644 index 000000000000..9499a2914f89 --- /dev/null +++ b/arch/sh/oprofile/backtrace.c @@ -0,0 +1,114 @@ +/* + * SH specific backtracing code for oprofile + * + * Copyright 2007 STMicroelectronics Ltd. + * + * Author: Dave Peverley <dpeverley@mpc-data.co.uk> + * + * Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386 + * oprofile backtrace code by John Levon, David Smith + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/oprofile.h> +#include <linux/sched.h> +#include <linux/kallsyms.h> +#include <linux/mm.h> +#include <asm/ptrace.h> +#include <asm/uaccess.h> +#include <asm/sections.h> + +/* Limit to stop backtracing too far. */ +static int backtrace_limit = 20; + +static unsigned long * +user_backtrace(unsigned long *stackaddr, struct pt_regs *regs) +{ + unsigned long buf_stack; + + /* Also check accessibility of address */ + if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) + return NULL; + + if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long))) + return NULL; + + /* Quick paranoia check */ + if (buf_stack & 3) + return NULL; + + oprofile_add_trace(buf_stack); + + stackaddr++; + + return stackaddr; +} + +/* + * | | /\ Higher addresses + * | | + * --------------- stack base (address of current_thread_info) + * | thread info | + * . . + * | stack | + * --------------- saved regs->regs[15] value if valid + * . . + * --------------- struct pt_regs stored on stack (struct pt_regs *) + * | | + * . . + * | | + * --------------- ??? + * | | + * | | \/ Lower addresses + * + * Thus, &pt_regs <-> stack base restricts the valid(ish) fp values + */ +static int valid_kernel_stack(unsigned long *stackaddr, struct pt_regs *regs) +{ + unsigned long stack = (unsigned long)regs; + unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE; + + return ((unsigned long)stackaddr > stack) && ((unsigned long)stackaddr < stack_base); +} + +static unsigned long * +kernel_backtrace(unsigned long *stackaddr, struct pt_regs *regs) +{ + unsigned long addr; + + /* + * If not a valid kernel address, keep going till we find one + * or the SP stops being a valid address. + */ + do { + addr = *stackaddr++; + oprofile_add_trace(addr); + } while (valid_kernel_stack(stackaddr, regs)); + + return stackaddr; +} + +void sh_backtrace(struct pt_regs * const regs, unsigned int depth) +{ + unsigned long *stackaddr; + + /* + * Paranoia - clip max depth as we could get lost in the weeds. + */ + if (depth > backtrace_limit) + depth = backtrace_limit; + + stackaddr = (unsigned long *)regs->regs[15]; + if (!user_mode(regs)) { + while (depth-- && valid_kernel_stack(stackaddr, regs)) + stackaddr = kernel_backtrace(stackaddr, regs); + + return; + } + + while (depth-- && (stackaddr != NULL)) + stackaddr = user_backtrace(stackaddr, regs); +} diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c new file mode 100644 index 000000000000..1d97d64cb95f --- /dev/null +++ b/arch/sh/oprofile/common.c @@ -0,0 +1,150 @@ +/* + * arch/sh/oprofile/init.c + * + * Copyright (C) 2003 - 2008 Paul Mundt + * + * Based on arch/mips/oprofile/common.c: + * + * Copyright (C) 2004, 2005 Ralf Baechle + * Copyright (C) 2005 MIPS Technologies, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/oprofile.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/smp.h> +#include <asm/processor.h> +#include "op_impl.h" + +extern struct op_sh_model op_model_sh7750_ops __weak; +extern struct op_sh_model op_model_sh4a_ops __weak; + +static struct op_sh_model *model; + +static struct op_counter_config ctr[20]; + +extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth); + +static int op_sh_setup(void) +{ + /* Pre-compute the values to stuff in the hardware registers. */ + model->reg_setup(ctr); + + /* Configure the registers on all cpus. */ + on_each_cpu(model->cpu_setup, NULL, 1); + + return 0; +} + +static int op_sh_create_files(struct super_block *sb, struct dentry *root) +{ + int i, ret = 0; + + for (i = 0; i < model->num_counters; i++) { + struct dentry *dir; + char buf[4]; + + snprintf(buf, sizeof(buf), "%d", i); + dir = oprofilefs_mkdir(sb, root, buf); + + ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); + ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); + ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); + ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); + + if (model->create_files) + ret |= model->create_files(sb, dir); + else + ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); + + /* Dummy entries */ + ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); + } + + return ret; +} + +static int op_sh_start(void) +{ + /* Enable performance monitoring for all counters. */ + on_each_cpu(model->cpu_start, NULL, 1); + + return 0; +} + +static void op_sh_stop(void) +{ + /* Disable performance monitoring for all counters. */ + on_each_cpu(model->cpu_stop, NULL, 1); +} + +int __init oprofile_arch_init(struct oprofile_operations *ops) +{ + struct op_sh_model *lmodel = NULL; + int ret; + + /* + * Always assign the backtrace op. If the counter initialization + * fails, we fall back to the timer which will still make use of + * this. + */ + ops->backtrace = sh_backtrace; + + switch (current_cpu_data.type) { + /* SH-4 types */ + case CPU_SH7750: + case CPU_SH7750S: + lmodel = &op_model_sh7750_ops; + break; + + /* SH-4A types */ + case CPU_SH7763: + case CPU_SH7770: + case CPU_SH7780: + case CPU_SH7781: + case CPU_SH7785: + case CPU_SH7723: + case CPU_SHX3: + lmodel = &op_model_sh4a_ops; + break; + + /* SH4AL-DSP types */ + case CPU_SH7343: + case CPU_SH7722: + case CPU_SH7366: + lmodel = &op_model_sh4a_ops; + break; + } + + if (!lmodel) + return -ENODEV; + if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER)) + return -ENODEV; + + ret = lmodel->init(); + if (unlikely(ret != 0)) + return ret; + + model = lmodel; + + ops->setup = op_sh_setup; + ops->create_files = op_sh_create_files; + ops->start = op_sh_start; + ops->stop = op_sh_stop; + ops->cpu_type = lmodel->cpu_type; + + printk(KERN_INFO "oprofile: using %s performance monitoring.\n", + lmodel->cpu_type); + + return 0; +} + +void oprofile_arch_exit(void) +{ + if (model && model->exit) + model->exit(); +} diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h new file mode 100644 index 000000000000..4d509975eba6 --- /dev/null +++ b/arch/sh/oprofile/op_impl.h @@ -0,0 +1,33 @@ +#ifndef __OP_IMPL_H +#define __OP_IMPL_H + +/* Per-counter configuration as set via oprofilefs. */ +struct op_counter_config { + unsigned long enabled; + unsigned long event; + + unsigned long long count; + + /* Dummy values for userspace tool compliance */ + unsigned long kernel; + unsigned long user; + unsigned long unit_mask; +}; + +/* Per-architecture configury and hooks. */ +struct op_sh_model { + void (*reg_setup)(struct op_counter_config *); + int (*create_files)(struct super_block *sb, struct dentry *dir); + void (*cpu_setup)(void *dummy); + int (*init)(void); + void (*exit)(void); + void (*cpu_start)(void *args); + void (*cpu_stop)(void *args); + char *cpu_type; + unsigned char num_counters; +}; + +/* arch/sh/oprofile/common.c */ +extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth); + +#endif /* __OP_IMPL_H */ diff --git a/arch/sh/oprofile/op_model_null.c b/arch/sh/oprofile/op_model_null.c deleted file mode 100644 index a845b088edb4..000000000000 --- a/arch/sh/oprofile/op_model_null.c +++ /dev/null @@ -1,23 +0,0 @@ -/* - * arch/sh/oprofile/op_model_null.c - * - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/kernel.h> -#include <linux/oprofile.h> -#include <linux/init.h> -#include <linux/errno.h> - -int __init oprofile_arch_init(struct oprofile_operations *ops) -{ - return -ENODEV; -} - -void oprofile_arch_exit(void) -{ -} - diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c index 008b3b03750a..c892c7c30c2f 100644 --- a/arch/sh/oprofile/op_model_sh7750.c +++ b/arch/sh/oprofile/op_model_sh7750.c @@ -3,7 +3,7 @@ * * OProfile support for SH7750/SH7750S Performance Counters * - * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2003 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -15,19 +15,16 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/fs.h> -#include <asm/uaccess.h> -#include <asm/io.h> +#include "op_impl.h" #define PM_CR_BASE 0xff000084 /* 16-bit */ #define PM_CTR_BASE 0xff100004 /* 32-bit */ -#define PMCR1 (PM_CR_BASE + 0x00) -#define PMCR2 (PM_CR_BASE + 0x04) -#define PMCTR1H (PM_CTR_BASE + 0x00) -#define PMCTR1L (PM_CTR_BASE + 0x04) -#define PMCTR2H (PM_CTR_BASE + 0x08) -#define PMCTR2L (PM_CTR_BASE + 0x0c) +#define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) +#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) +#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) #define PMCR_PMM_MASK 0x0000003f @@ -36,25 +33,15 @@ #define PMCR_PMST 0x00004000 #define PMCR_PMEN 0x00008000 -#define PMCR_ENABLE (PMCR_PMST | PMCR_PMEN) +struct op_sh_model op_model_sh7750_ops; -/* - * SH7750/SH7750S have 2 perf counters - */ #define NR_CNTRS 2 -struct op_counter_config { - unsigned long enabled; - unsigned long event; - unsigned long count; - - /* Dummy values for userspace tool compliance */ - unsigned long kernel; - unsigned long user; - unsigned long unit_mask; -}; - -static struct op_counter_config ctr[NR_CNTRS]; +static struct sh7750_ppc_register_config { + unsigned int ctrl; + unsigned long cnt_hi; + unsigned long cnt_lo; +} regcache[NR_CNTRS]; /* * There are a number of events supported by each counter (33 in total). @@ -116,12 +103,8 @@ static int sh7750_timer_notify(struct pt_regs *regs) static u64 sh7750_read_counter(int counter) { - u32 hi, lo; - - hi = (counter == 0) ? ctrl_inl(PMCTR1H) : ctrl_inl(PMCTR2H); - lo = (counter == 0) ? ctrl_inl(PMCTR1L) : ctrl_inl(PMCTR2L); - - return (u64)((u64)(hi & 0xffff) << 32) | lo; + return (u64)((u64)(__raw_readl(PMCTRH(counter)) & 0xffff) << 32) | + __raw_readl(PMCTRL(counter)); } /* @@ -170,11 +153,7 @@ static ssize_t sh7750_write_count(struct file *file, const char __user *buf, */ WARN_ON(val != 0); - if (counter == 0) { - ctrl_outw(ctrl_inw(PMCR1) | PMCR_PMCLR, PMCR1); - } else { - ctrl_outw(ctrl_inw(PMCR2) | PMCR_PMCLR, PMCR2); - } + __raw_writew(__raw_readw(PMCR(counter)) | PMCR_PMCLR, PMCR(counter)); return count; } @@ -184,88 +163,93 @@ static const struct file_operations count_fops = { .write = sh7750_write_count, }; -static int sh7750_perf_counter_create_files(struct super_block *sb, struct dentry *root) +static int sh7750_ppc_create_files(struct super_block *sb, struct dentry *dir) { - int i; + return oprofilefs_create_file(sb, dir, "count", &count_fops); +} - for (i = 0; i < NR_CNTRS; i++) { - struct dentry *dir; - char buf[4]; +static void sh7750_ppc_reg_setup(struct op_counter_config *ctr) +{ + unsigned int counters = op_model_sh7750_ops.num_counters; + int i; - snprintf(buf, sizeof(buf), "%d", i); - dir = oprofilefs_mkdir(sb, root, buf); + for (i = 0; i < counters; i++) { + regcache[i].ctrl = 0; + regcache[i].cnt_hi = 0; + regcache[i].cnt_lo = 0; - oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); - oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); - oprofilefs_create_file(sb, dir, "count", &count_fops); + if (!ctr[i].enabled) + continue; - /* Dummy entries */ - oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); - oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); - oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); + regcache[i].ctrl |= ctr[i].event | PMCR_PMEN | PMCR_PMST; + regcache[i].cnt_hi = (unsigned long)((ctr->count >> 32) & 0xffff); + regcache[i].cnt_lo = (unsigned long)(ctr->count & 0xffffffff); } - - return 0; } -static int sh7750_perf_counter_start(void) +static void sh7750_ppc_cpu_setup(void *args) { - u16 pmcr; - - /* Enable counter 1 */ - if (ctr[0].enabled) { - pmcr = ctrl_inw(PMCR1); - WARN_ON(pmcr & PMCR_PMEN); - - pmcr &= ~PMCR_PMM_MASK; - pmcr |= ctr[0].event; - ctrl_outw(pmcr | PMCR_ENABLE, PMCR1); - } - - /* Enable counter 2 */ - if (ctr[1].enabled) { - pmcr = ctrl_inw(PMCR2); - WARN_ON(pmcr & PMCR_PMEN); + unsigned int counters = op_model_sh7750_ops.num_counters; + int i; - pmcr &= ~PMCR_PMM_MASK; - pmcr |= ctr[1].event; - ctrl_outw(pmcr | PMCR_ENABLE, PMCR2); + for (i = 0; i < counters; i++) { + __raw_writew(0, PMCR(i)); + __raw_writel(regcache[i].cnt_hi, PMCTRH(i)); + __raw_writel(regcache[i].cnt_lo, PMCTRL(i)); } - - return register_timer_hook(sh7750_timer_notify); } -static void sh7750_perf_counter_stop(void) +static void sh7750_ppc_cpu_start(void *args) { - ctrl_outw(ctrl_inw(PMCR1) & ~PMCR_PMEN, PMCR1); - ctrl_outw(ctrl_inw(PMCR2) & ~PMCR_PMEN, PMCR2); + unsigned int counters = op_model_sh7750_ops.num_counters; + int i; - unregister_timer_hook(sh7750_timer_notify); + for (i = 0; i < counters; i++) + __raw_writew(regcache[i].ctrl, PMCR(i)); } -static struct oprofile_operations sh7750_perf_counter_ops = { - .create_files = sh7750_perf_counter_create_files, - .start = sh7750_perf_counter_start, - .stop = sh7750_perf_counter_stop, -}; - -int __init oprofile_arch_init(struct oprofile_operations *ops) +static void sh7750_ppc_cpu_stop(void *args) { - if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER)) - return -ENODEV; + unsigned int counters = op_model_sh7750_ops.num_counters; + int i; - ops = &sh7750_perf_counter_ops; - ops->cpu_type = "sh/sh7750"; + /* Disable the counters */ + for (i = 0; i < counters; i++) + __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); +} - printk(KERN_INFO "oprofile: using SH-4 performance monitoring.\n"); +static inline void sh7750_ppc_reset(void) +{ + unsigned int counters = op_model_sh7750_ops.num_counters; + int i; /* Clear the counters */ - ctrl_outw(ctrl_inw(PMCR1) | PMCR_PMCLR, PMCR1); - ctrl_outw(ctrl_inw(PMCR2) | PMCR_PMCLR, PMCR2); + for (i = 0; i < counters; i++) + __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMCLR, PMCR(i)); +} - return 0; +static int sh7750_ppc_init(void) +{ + sh7750_ppc_reset(); + + return register_timer_hook(sh7750_timer_notify); } -void oprofile_arch_exit(void) +static void sh7750_ppc_exit(void) { + unregister_timer_hook(sh7750_timer_notify); + + sh7750_ppc_reset(); } + +struct op_sh_model op_model_sh7750_ops = { + .cpu_type = "sh/sh7750", + .num_counters = NR_CNTRS, + .reg_setup = sh7750_ppc_reg_setup, + .cpu_setup = sh7750_ppc_cpu_setup, + .cpu_start = sh7750_ppc_cpu_start, + .cpu_stop = sh7750_ppc_cpu_stop, + .init = sh7750_ppc_init, + .exit = sh7750_ppc_exit, + .create_files = sh7750_ppc_create_files, +}; diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index d0c2928d1066..284b7e867496 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -8,6 +8,7 @@ SE SH_SOLUTION_ENGINE HIGHLANDER SH_HIGHLANDER RTS7751R2D SH_RTS7751R2D +RSK SH_RSK # # List of companion chips / MFDs. @@ -46,6 +47,7 @@ R2D_1 RTS7751R2D_1 CAYMAN SH_CAYMAN SDK7780 SH_SDK7780 MIGOR SH_MIGOR +RSK7201 SH_RSK7201 RSK7203 SH_RSK7203 AP325RXA SH_AP325RXA SH7763RDP SH_SH7763RDP diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h index 19790eb99cc6..3702e087df2c 100644 --- a/arch/sparc/include/asm/device.h +++ b/arch/sparc/include/asm/device.h @@ -20,4 +20,16 @@ struct dev_archdata { int numa_node; }; +static inline void dev_archdata_set_node(struct dev_archdata *ad, + struct device_node *np) +{ + ad->prom_node = np; +} + +static inline struct device_node * +dev_archdata_get_node(const struct dev_archdata *ad) +{ + return ad->prom_node; +} + #endif /* _ASM_SPARC_DEVICE_H */ diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c index 5b45a808c621..a62ff83337cd 100644 --- a/arch/sparc64/kernel/idprom.c +++ b/arch/sparc64/kernel/idprom.c @@ -42,8 +42,5 @@ void __init idprom_init(void) idprom->id_cksum, calc_idprom_cksum(idprom)); } - printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n", - idprom->id_ethaddr[0], idprom->id_ethaddr[1], - idprom->id_ethaddr[2], idprom->id_ethaddr[3], - idprom->id_ethaddr[4], idprom->id_ethaddr[5]); + printk("Ethernet address: %pM\n", idprom->id_ethaddr); } diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c index d53ff52bb404..b4a1522f2157 100644 --- a/arch/um/drivers/daemon_kern.c +++ b/arch/um/drivers/daemon_kern.c @@ -22,7 +22,7 @@ static void daemon_init(struct net_device *dev, void *data) struct daemon_data *dpri; struct daemon_init *init = data; - pri = dev->priv; + pri = netdev_priv(dev); dpri = (struct daemon_data *) pri->user; dpri->sock_type = init->sock_type; dpri->ctl_sock = init->ctl_sock; diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c index 8c4378a76d63..ffc6416d5ed7 100644 --- a/arch/um/drivers/mcast_kern.c +++ b/arch/um/drivers/mcast_kern.c @@ -28,7 +28,7 @@ static void mcast_init(struct net_device *dev, void *data) struct mcast_data *dpri; struct mcast_init *init = data; - pri = dev->priv; + pri = netdev_priv(dev); dpri = (struct mcast_data *) pri->user; dpri->addr = init->addr; dpri->port = init->port; diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 8f44ebb0dec8..e14629c87de4 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -161,7 +161,8 @@ void mconsole_proc(struct mc_request *req) goto out_kill; } - file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY); + file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY, + current_cred()); if (IS_ERR(file)) { mconsole_reply(req, "Failed to open file", 1, 0); goto out_kill; diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 5b4ca8d93682..fde510b664d3 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -76,7 +76,7 @@ out: static int uml_net_rx(struct net_device *dev) { - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; @@ -119,7 +119,7 @@ static void uml_dev_close(struct work_struct *work) static irqreturn_t uml_net_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); int err; if (!netif_running(dev)) @@ -150,7 +150,7 @@ out: static int uml_net_open(struct net_device *dev) { - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); int err; if (lp->fd >= 0) { @@ -195,7 +195,7 @@ out: static int uml_net_close(struct net_device *dev) { - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); netif_stop_queue(dev); @@ -213,7 +213,7 @@ static int uml_net_close(struct net_device *dev) static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); unsigned long flags; int len; @@ -250,7 +250,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) static struct net_device_stats *uml_net_get_stats(struct net_device *dev) { - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); return &lp->stats; } @@ -267,7 +267,7 @@ static void uml_net_tx_timeout(struct net_device *dev) static int uml_net_set_mac(struct net_device *dev, void *addr) { - struct uml_net_private *lp = dev->priv; + struct uml_net_private *lp = netdev_priv(dev); struct sockaddr *hwaddr = addr; spin_lock_irq(&lp->lock); @@ -368,7 +368,7 @@ static void net_device_release(struct device *dev) { struct uml_net *device = dev->driver_data; struct net_device *netdev = device->dev; - struct uml_net_private *lp = netdev->priv; + struct uml_net_private *lp = netdev_priv(netdev); if (lp->remove != NULL) (*lp->remove)(&lp->user); @@ -418,14 +418,9 @@ static void eth_configure(int n, void *init, char *mac, setup_etheraddr(mac, device->mac, dev->name); - printk(KERN_INFO "Netdevice %d ", n); - printk("(%02x:%02x:%02x:%02x:%02x:%02x) ", - device->mac[0], device->mac[1], - device->mac[2], device->mac[3], - device->mac[4], device->mac[5]); - printk(": "); + printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); - lp = dev->priv; + lp = netdev_priv(dev); /* This points to the transport private data. It's still clear, but we * must memset it to 0 *now*. Let's help the drivers. */ memset(lp, 0, size); @@ -735,7 +730,7 @@ static int net_remove(int n, char **error_out) return -ENODEV; dev = device->dev; - lp = dev->priv; + lp = netdev_priv(dev); if (lp->fd > 0) return -EBUSY; unregister_netdev(dev); @@ -766,7 +761,7 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, if (dev->open != uml_net_open) return NOTIFY_DONE; - lp = dev->priv; + lp = netdev_priv(dev); proc = NULL; switch (event) { diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c index 3a750dd39be1..2860525f8ff6 100644 --- a/arch/um/drivers/pcap_kern.c +++ b/arch/um/drivers/pcap_kern.c @@ -21,7 +21,7 @@ void pcap_init(struct net_device *dev, void *data) struct pcap_data *ppri; struct pcap_init *init = data; - pri = dev->priv; + pri = netdev_priv(dev); ppri = (struct pcap_data *) pri->user; ppri->host_if = init->host_if; ppri->promisc = init->promisc; diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c index d19faec7046e..5ec17563142e 100644 --- a/arch/um/drivers/slip_kern.c +++ b/arch/um/drivers/slip_kern.c @@ -19,7 +19,7 @@ static void slip_init(struct net_device *dev, void *data) struct slip_data *spri; struct slip_init *init = data; - private = dev->priv; + private = netdev_priv(dev); spri = (struct slip_data *) private->user; memset(spri->name, 0, sizeof(spri->name)); diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c index d987af277db9..f15a6e7654f3 100644 --- a/arch/um/drivers/slirp_kern.c +++ b/arch/um/drivers/slirp_kern.c @@ -22,7 +22,7 @@ void slirp_init(struct net_device *dev, void *data) struct slirp_init *init = data; int i; - private = dev->priv; + private = netdev_priv(dev); spri = (struct slirp_data *) private->user; spri->argw = init->argw; diff --git a/arch/um/drivers/vde_kern.c b/arch/um/drivers/vde_kern.c index add7e722defb..1b852bffdebc 100644 --- a/arch/um/drivers/vde_kern.c +++ b/arch/um/drivers/vde_kern.c @@ -19,7 +19,7 @@ static void vde_init(struct net_device *dev, void *data) struct uml_net_private *pri; struct vde_data *vpri; - pri = dev->priv; + pri = netdev_priv(dev); vpri = (struct vde_data *) pri->user; vpri->vde_switch = init->vde_switch; diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c index 046a131f6104..7f6f9a71aae4 100644 --- a/arch/um/os-Linux/drivers/ethertap_kern.c +++ b/arch/um/os-Linux/drivers/ethertap_kern.c @@ -22,7 +22,7 @@ static void etap_init(struct net_device *dev, void *data) struct ethertap_data *epri; struct ethertap_init *init = data; - pri = dev->priv; + pri = netdev_priv(dev); epri = (struct ethertap_data *) pri->user; epri->dev_name = init->dev_name; epri->gate_addr = init->gate_addr; diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c index 6b9e33d5de20..4048800e4696 100644 --- a/arch/um/os-Linux/drivers/tuntap_kern.c +++ b/arch/um/os-Linux/drivers/tuntap_kern.c @@ -21,7 +21,7 @@ static void tuntap_init(struct net_device *dev, void *data) struct tuntap_data *tpri; struct tuntap_init *init = data; - pri = dev->priv; + pri = netdev_priv(dev); tpri = (struct tuntap_data *) pri->user; tpri->dev_name = init->dev_name; tpri->fixed_config = (init->dev_name != NULL); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ac22bb7719f7..98a0ed52b5c3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -19,6 +19,8 @@ config X86_64 config X86 def_bool y select HAVE_AOUT if X86_32 + select HAVE_READQ + select HAVE_WRITEQ select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE @@ -29,11 +31,14 @@ config X86 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_EFFICIENT_UNALIGNED_ACCESS + select USER_STACKTRACE_SUPPORT config ARCH_DEFCONFIG string @@ -87,6 +92,10 @@ config GENERIC_IOMAP config GENERIC_BUG def_bool y depends on BUG + select GENERIC_BUG_RELATIVE_POINTERS if X86_64 + +config GENERIC_BUG_RELATIVE_POINTERS + bool config GENERIC_HWEIGHT def_bool y @@ -242,21 +251,13 @@ config X86_FIND_SMP_CONFIG def_bool y depends on X86_MPPARSE || X86_VOYAGER -if ACPI config X86_MPPARSE - def_bool y - bool "Enable MPS table" + bool "Enable MPS table" if ACPI + default y depends on X86_LOCAL_APIC help For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -endif - -if !ACPI -config X86_MPPARSE - def_bool y - depends on X86_LOCAL_APIC -endif choice prompt "Subarchitecture Type" @@ -367,10 +368,10 @@ config X86_RDC321X as R-8610-(G). If you don't have one of these chips, you should say N here. -config SCHED_NO_NO_OMIT_FRAME_POINTER +config SCHED_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" - depends on X86_32 + depends on X86 help Calculate simpler /proc/<PID>/wchan values. If this option is disabled then wchan values will recurse back to the @@ -465,10 +466,6 @@ config X86_CYCLONE_TIMER def_bool y depends on X86_GENERICARCH -config ES7000_CLUSTERED_APIC - def_bool y - depends on SMP && X86_ES7000 && MPENTIUMIII - source "arch/x86/Kconfig.cpu" config HPET_TIMER @@ -569,7 +566,7 @@ config AMD_IOMMU # need this always selected by IOMMU for the VIA workaround config SWIOTLB - bool + def_bool y if X86_64 help Support for software bounce buffers used on x86-64 systems which don't have a hardware IOMMU (e.g. the current generation @@ -660,6 +657,30 @@ config X86_VISWS_APIC def_bool y depends on X86_32 && X86_VISWS +config X86_REROUTE_FOR_BROKEN_BOOT_IRQS + bool "Reroute for broken boot IRQs" + default n + depends on X86_IO_APIC + help + This option enables a workaround that fixes a source of + spurious interrupts. This is recommended when threaded + interrupt handling is used on systems where the generation of + superfluous "boot interrupts" cannot be disabled. + + Some chipsets generate a legacy INTx "boot IRQ" when the IRQ + entry in the chipset's IO-APIC is masked (as, e.g. the RT + kernel does during interrupt handling). On chipsets where this + boot IRQ generation cannot be disabled, this workaround keeps + the original IRQ line masked so that only the equivalent "boot + IRQ" is delivered to the CPUs. The workaround also tells the + kernel to set up the IRQ handler on the boot IRQ line. In this + way only one interrupt is delivered to the kernel. Otherwise + the spurious second interrupt may cause the kernel to bring + down (vital) interrupt lines. + + Only affects "broken" chipsets. Interrupt sharing may be + increased on these systems. + config X86_MCE bool "Machine Check Exception" depends on !X86_VOYAGER @@ -956,24 +977,37 @@ config X86_PAE config ARCH_PHYS_ADDR_T_64BIT def_bool X86_64 || X86_PAE +config DIRECT_GBPAGES + bool "Enable 1GB pages for kernel pagetables" if EMBEDDED + default y + depends on X86_64 + help + Allow the kernel linear mapping to use 1GB pages on CPUs that + support it. This can improve the kernel's performance a tiny bit by + reducing TLB pressure. If in doubt, say "Y". + # Common NUMA Features config NUMA - bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" + bool "Numa Memory Allocation and Scheduler Support" depends on SMP depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) help Enable NUMA (Non Uniform Memory Access) support. + The kernel will try to allocate memory used by a CPU on the local memory controller of the CPU and add some more NUMA awareness to the kernel. - For 32-bit this is currently highly experimental and should be only - used for kernel development. It might also cause boot failures. - For 64-bit this is recommended on all multiprocessor Opteron systems. - If the system is EM64T, you should say N unless your system is - EM64T NUMA. + For 64-bit this is recommended if the system is Intel Core i7 + (or later), AMD Opteron, or EM64T NUMA. + + For 32-bit this is only needed on (rare) 32-bit-only platforms + that support NUMA topologies, such as NUMAQ / Summit, or if you + boot a 32-bit kernel on a 64-bit NUMA platform. + + Otherwise, you should say N. comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI) @@ -1493,6 +1527,10 @@ config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y depends on X86_64 || (X86_32 && HIGHMEM) +config ARCH_ENABLE_MEMORY_HOTREMOVE + def_bool y + depends on MEMORY_HOTPLUG + config HAVE_ARCH_EARLY_PFN_TO_NID def_bool X86_64 depends on NUMA @@ -1632,13 +1670,6 @@ config APM_ALLOW_INTS many of the newer IBM Thinkpads. If you experience hangs when you suspend, try setting this to Y. Otherwise, say N. -config APM_REAL_MODE_POWER_OFF - bool "Use real mode APM BIOS call to power off" - help - Use real mode APM BIOS calls to switch off the computer. This is - a work-around for a number of buggy BIOSes. Switch this option on if - your computer crashes instead of powering off properly. - endif # APM source "arch/x86/kernel/cpu/cpufreq/Kconfig" diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 8e99073b9e0f..85a78575956c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -515,12 +515,12 @@ config CPU_SUP_UMC_32 config X86_DS def_bool X86_PTRACE_BTS depends on X86_DEBUGCTLMSR + select HAVE_HW_BRANCH_TRACER config X86_PTRACE_BTS bool "Branch Trace Store" default y depends on X86_DEBUGCTLMSR - depends on BROKEN help This adds a ptrace interface to the hardware's branch trace store. diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 2a3dfbd5e677..10d6cc3fd052 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -114,18 +114,6 @@ config DEBUG_RODATA data. This is recommended so that we can catch kernel bugs sooner. If in doubt, say "Y". -config DIRECT_GBPAGES - bool "Enable gbpages-mapped kernel pagetables" - depends on DEBUG_KERNEL && EXPERIMENTAL && X86_64 - help - Enable gigabyte pages support (if the CPU supports it). This can - improve the kernel's performance a tiny bit by reducing TLB - pressure. - - This is experimental code. - - If in doubt, say "N". - config DEBUG_RODATA_TEST bool "Testcase for the DEBUG_RODATA feature" depends on DEBUG_RODATA @@ -186,14 +174,10 @@ config IOMMU_LEAK Add a simple leak tracer to the IOMMU code. This is useful when you are debugging a buggy device driver that leaks IOMMU mappings. -config MMIOTRACE_HOOKS - bool - config MMIOTRACE bool "Memory mapped IO tracing" depends on DEBUG_KERNEL && PCI select TRACING - select MMIOTRACE_HOOKS help Mmiotrace traces Memory Mapped I/O access and is meant for debugging and reverse engineering. It is called from the ioremap @@ -307,10 +291,10 @@ config OPTIMIZE_INLINING developers have marked 'inline'. Doing so takes away freedom from gcc to do what it thinks is best, which is desirable for the gcc 3.x series of compilers. The gcc 4.x series have a rewritten inlining algorithm and - disabling this option will generate a smaller kernel there. Hopefully - this algorithm is so good that allowing gcc4 to make the decision can - become the default in the future, until then this option is there to - test gcc for this. + enabling this option will generate a smaller kernel there. Hopefully + this algorithm is so good that allowing gcc 4.x and above to make the + decision will become the default in the future. Until then this option + is there to test gcc for this. If unsure, say N. diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c index b939cb476dec..5d4742ed4aa2 100644 --- a/arch/x86/boot/video-vga.c +++ b/arch/x86/boot/video-vga.c @@ -34,7 +34,7 @@ static struct mode_info cga_modes[] = { { VIDEO_80x25, 80, 25, 0 }, }; -__videocard video_vga; +static __videocard video_vga; /* Set basic 80x25 mode */ static u8 vga_set_basic_mode(void) @@ -259,7 +259,7 @@ static int vga_probe(void) return mode_count[adapter]; } -__videocard video_vga = { +static __videocard video_vga = { .card_name = "VGA", .probe = vga_probe, .set_mode = vga_set_mode, diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 83598b23093a..3bef2c1febe9 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -226,7 +226,7 @@ static unsigned int mode_menu(void) #ifdef CONFIG_VIDEO_RETAIN /* Save screen content to the heap */ -struct saved_screen { +static struct saved_screen { int x, y; int curx, cury; u16 *data; diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 13b8c86ae985..b30a08ed8eb4 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -77,7 +77,7 @@ CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_AUDIT_TREE=y # CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_BUF_SHIFT=18 CONFIG_CGROUPS=y # CONFIG_CGROUP_DEBUG is not set CONFIG_CGROUP_NS=y @@ -298,7 +298,7 @@ CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y # CONFIG_KEXEC_JUMP is not set CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y +# CONFIG_RELOCATABLE is not set CONFIG_PHYSICAL_ALIGN=0x200000 CONFIG_HOTPLUG_CPU=y # CONFIG_COMPAT_VDSO is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index f0a03d7a7d63..0e7dbc0a3e46 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -77,7 +77,7 @@ CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_AUDIT_TREE=y # CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_BUF_SHIFT=18 CONFIG_CGROUPS=y # CONFIG_CGROUP_DEBUG is not set CONFIG_CGROUP_NS=y @@ -298,7 +298,7 @@ CONFIG_SCHED_HRTICK=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y +# CONFIG_RELOCATABLE is not set CONFIG_PHYSICAL_ALIGN=0x200000 CONFIG_HOTPLUG_CPU=y # CONFIG_COMPAT_VDSO is not set diff --git a/arch/x86/crypto/crc32c-intel.c b/arch/x86/crypto/crc32c-intel.c index 070afc5b6c94..b9d00261703c 100644 --- a/arch/x86/crypto/crc32c-intel.c +++ b/arch/x86/crypto/crc32c-intel.c @@ -6,13 +6,22 @@ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2A: Instruction Set Reference, A-M * - * Copyright (c) 2008 Austin Zhang <austin_zhang@linux.intel.com> - * Copyright (c) 2008 Kent Liu <kent.liu@intel.com> + * Copyright (C) 2008 Intel Corporation + * Authors: Austin Zhang <austin_zhang@linux.intel.com> + * Kent Liu <kent.liu@intel.com> * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/init.h> @@ -75,99 +84,92 @@ static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ -static int crc32c_intel_setkey(struct crypto_ahash *hash, const u8 *key, +static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { - u32 *mctx = crypto_ahash_ctx(hash); + u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { - crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = le32_to_cpup((__le32 *)key); return 0; } -static int crc32c_intel_init(struct ahash_request *req) +static int crc32c_intel_init(struct shash_desc *desc) { - u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - u32 *crcp = ahash_request_ctx(req); + u32 *mctx = crypto_shash_ctx(desc->tfm); + u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } -static int crc32c_intel_update(struct ahash_request *req) +static int crc32c_intel_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct crypto_hash_walk walk; - u32 *crcp = ahash_request_ctx(req); - u32 crc = *crcp; - int nbytes; - - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; - nbytes = crypto_hash_walk_done(&walk, 0)) - crc = crc32c_intel_le_hw(crc, walk.data, nbytes); + u32 *crcp = shash_desc_ctx(desc); - *crcp = crc; + *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; } -static int crc32c_intel_final(struct ahash_request *req) +static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) { - u32 *crcp = ahash_request_ctx(req); - - *(__le32 *)req->result = ~cpu_to_le32p(crcp); + *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; } -static int crc32c_intel_digest(struct ahash_request *req) +static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - struct crypto_hash_walk walk; - u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - u32 crc = *mctx; - int nbytes; + return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out); +} - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; - nbytes = crypto_hash_walk_done(&walk, 0)) - crc = crc32c_intel_le_hw(crc, walk.data, nbytes); +static int crc32c_intel_final(struct shash_desc *desc, u8 *out) +{ + u32 *crcp = shash_desc_ctx(desc); - *(__le32 *)req->result = ~cpu_to_le32(crc); + *(__le32 *)out = ~cpu_to_le32p(crcp); return 0; } +static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len, + out); +} + static int crc32c_intel_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; - tfm->crt_ahash.reqsize = sizeof(u32); - return 0; } -static struct crypto_alg alg = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-intel", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 3, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_init = crc32c_intel_cra_init, - .cra_type = &crypto_ahash_type, - .cra_u = { - .ahash = { - .digestsize = CHKSUM_DIGEST_SIZE, - .setkey = crc32c_intel_setkey, - .init = crc32c_intel_init, - .update = crc32c_intel_update, - .final = crc32c_intel_final, - .digest = crc32c_intel_digest, - } +static struct shash_alg alg = { + .setkey = crc32c_intel_setkey, + .init = crc32c_intel_init, + .update = crc32c_intel_update, + .final = crc32c_intel_final, + .finup = crc32c_intel_finup, + .digest = crc32c_intel_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-intel", + .cra_priority = 200, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_ctxsize = sizeof(u32), + .cra_module = THIS_MODULE, + .cra_init = crc32c_intel_cra_init, } }; @@ -175,14 +177,14 @@ static struct crypto_alg alg = { static int __init crc32c_intel_mod_init(void) { if (cpu_has_xmm4_2) - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); else return -ENODEV; } static void __exit crc32c_intel_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(crc32c_intel_mod_init); @@ -194,4 +196,3 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("crc32c"); MODULE_ALIAS("crc32c-intel"); - diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 127ec3f07214..2a4d073d2cf1 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -327,7 +327,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) current->mm->cached_hole_size = 0; current->mm->mmap = NULL; - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; if (N_MAGIC(ex) == OMAGIC) { diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 4bc02b23674b..b195f85526e3 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -32,6 +32,8 @@ #include <asm/proto.h> #include <asm/vdso.h> +#include <asm/sigframe.h> + #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -41,7 +43,6 @@ X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ X86_EFLAGS_CF) -asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); void signal_fault(struct pt_regs *regs, void __user *frame, char *where); int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) @@ -173,47 +174,28 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, /* * Do a signal return; undo the signal stack. */ +#define COPY(x) { \ + err |= __get_user(regs->x, &sc->x); \ +} -struct sigframe -{ - u32 pretcode; - int sig; - struct sigcontext_ia32 sc; - struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */ - unsigned int extramask[_COMPAT_NSIG_WORDS-1]; - char retcode[8]; - /* fp state follows here */ -}; - -struct rt_sigframe -{ - u32 pretcode; - int sig; - u32 pinfo; - u32 puc; - compat_siginfo_t info; - struct ucontext_ia32 uc; - char retcode[8]; - /* fp state follows here */ -}; - -#define COPY(x) { \ - unsigned int reg; \ - err |= __get_user(reg, &sc->x); \ - regs->x = reg; \ +#define COPY_SEG_CPL3(seg) { \ + unsigned short tmp; \ + err |= __get_user(tmp, &sc->seg); \ + regs->seg = tmp | 3; \ } -#define RELOAD_SEG(seg,mask) \ - { unsigned int cur; \ - unsigned short pre; \ - err |= __get_user(pre, &sc->seg); \ - savesegment(seg, cur); \ - pre |= mask; \ - if (pre != cur) loadsegment(seg, pre); } +#define RELOAD_SEG(seg) { \ + unsigned int cur, pre; \ + err |= __get_user(pre, &sc->seg); \ + savesegment(seg, cur); \ + pre |= 3; \ + if (pre != cur) \ + loadsegment(seg, pre); \ +} static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_ia32 __user *sc, - unsigned int *peax) + unsigned int *pax) { unsigned int tmpflags, gs, oldgs, err = 0; void __user *buf; @@ -240,18 +222,16 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, if (gs != oldgs) load_gs_index(gs); - RELOAD_SEG(fs, 3); - RELOAD_SEG(ds, 3); - RELOAD_SEG(es, 3); + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); /* Don't touch extended registers */ - err |= __get_user(regs->cs, &sc->cs); - regs->cs |= 3; - err |= __get_user(regs->ss, &sc->ss); - regs->ss |= 3; + COPY_SEG_CPL3(cs); + COPY_SEG_CPL3(ss); err |= __get_user(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); @@ -262,15 +242,13 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, buf = compat_ptr(tmp); err |= restore_i387_xstate_ia32(buf); - err |= __get_user(tmp, &sc->ax); - *peax = tmp; - + err |= __get_user(*pax, &sc->ax); return err; } asmlinkage long sys32_sigreturn(struct pt_regs *regs) { - struct sigframe __user *frame = (struct sigframe __user *)(regs->sp-8); + struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); sigset_t set; unsigned int ax; @@ -300,12 +278,12 @@ badframe: asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) { - struct rt_sigframe __user *frame; + struct rt_sigframe_ia32 __user *frame; sigset_t set; unsigned int ax; struct pt_regs tregs; - frame = (struct rt_sigframe __user *)(regs->sp - 4); + frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -359,20 +337,15 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, err |= __put_user(regs->dx, &sc->dx); err |= __put_user(regs->cx, &sc->cx); err |= __put_user(regs->ax, &sc->ax); - err |= __put_user(regs->cs, &sc->cs); - err |= __put_user(regs->ss, &sc->ss); err |= __put_user(current->thread.trap_no, &sc->trapno); err |= __put_user(current->thread.error_code, &sc->err); err |= __put_user(regs->ip, &sc->ip); + err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); err |= __put_user(regs->flags, &sc->flags); err |= __put_user(regs->sp, &sc->sp_at_signal); + err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); - tmp = save_i387_xstate_ia32(fpstate); - if (tmp < 0) - err = -EFAULT; - else - err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL), - &sc->fpstate); + err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate); /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); @@ -400,7 +373,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, } /* This is the legacy signal stack switching. */ - else if ((regs->ss & 0xffff) != __USER_DS && + else if ((regs->ss & 0xffff) != __USER32_DS && !(ka->sa.sa_flags & SA_RESTORER) && ka->sa.sa_restorer) sp = (unsigned long) ka->sa.sa_restorer; @@ -408,6 +381,8 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, if (used_math()) { sp = sp - sig_xstate_ia32_size; *fpstate = (struct _fpstate_ia32 *) sp; + if (save_i387_xstate_ia32(*fpstate) < 0) + return (void __user *) -1L; } sp -= frame_size; @@ -420,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int ia32_setup_frame(int sig, struct k_sigaction *ka, compat_sigset_t *set, struct pt_regs *regs) { - struct sigframe __user *frame; + struct sigframe_ia32 __user *frame; void __user *restorer; int err = 0; void __user *fpstate = NULL; @@ -430,12 +405,10 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, u16 poplmovl; u32 val; u16 int80; - u16 pad; } __attribute__((packed)) code = { 0xb858, /* popl %eax ; movl $...,%eax */ __NR_ia32_sigreturn, 0x80cd, /* int $0x80 */ - 0, }; frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); @@ -471,7 +444,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ - err |= __copy_to_user(frame->retcode, &code, 8); + err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); if (err) return -EFAULT; @@ -501,7 +474,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, compat_sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe __user *frame; + struct rt_sigframe_ia32 __user *frame; void __user *restorer; int err = 0; void __user *fpstate = NULL; @@ -511,8 +484,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, u8 movl; u32 val; u16 int80; - u16 pad; - u8 pad2; + u8 pad; } __attribute__((packed)) code = { 0xb8, __NR_ia32_rt_sigreturn, @@ -559,7 +531,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Not actually used anymore, but left because some gdb * versions need it. */ - err |= __copy_to_user(frame->retcode, &code, 8); + err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); if (err) return -EFAULT; @@ -572,11 +544,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; - /* Make -mregparm=3 work */ - regs->ax = sig; - regs->dx = (unsigned long) &frame->info; - regs->cx = (unsigned long) &frame->uc; - loadsegment(ds, __USER32_DS); loadsegment(es, __USER32_DS); diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 3b1510b4fc57..25caa0738af5 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -193,6 +193,7 @@ extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); static inline void lapic_shutdown(void) { } #define local_apic_timer_c2_ok 1 static inline void init_apic_mappings(void) { } +static inline void disable_local_APIC(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 1d9543b9d358..ce547f24a1cd 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -24,8 +24,6 @@ static inline cpumask_t target_cpus(void) #define INT_DELIVERY_MODE (dest_Fixed) #define INT_DEST_MODE (0) /* phys delivery to target proc */ #define NO_BALANCE_IRQ (0) -#define WAKE_SECONDARY_VIA_INIT - static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 360010322711..9fa9dcdf344b 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) */ static inline void change_bit(int nr, volatile unsigned long *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); + if (IS_IMMEDIATE(nr)) { + asm volatile(LOCK_PREFIX "xorb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr))); + } else { + asm volatile(LOCK_PREFIX "btc %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } } /** diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 3def2065fcea..d9cf1cd156d2 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -9,7 +9,7 @@ #ifdef CONFIG_X86_32 # define __BUG_C0 "2:\t.long 1b, %c0\n" #else -# define __BUG_C0 "2:\t.quad 1b, %c0\n" +# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n" #endif #define BUG() \ diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index e02ae2d89acf..f110ad417df3 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h @@ -4,26 +4,33 @@ #include <asm/types.h> #include <linux/compiler.h> -#ifdef __GNUC__ +#define __LITTLE_ENDIAN -#ifdef __i386__ - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 __arch_swab32(__u32 val) { -#ifdef CONFIG_X86_BSWAP - asm("bswap %0" : "=r" (x) : "0" (x)); -#else +#ifdef __i386__ +# ifdef CONFIG_X86_BSWAP + asm("bswap %0" : "=r" (val) : "0" (val)); +# else asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ "rorl $16,%0\n\t" /* swap words */ "xchgb %b0,%h0" /* swap higher bytes */ - : "=q" (x) - : "0" (x)); + : "=q" (val) + : "0" (val)); +# endif + +#else /* __i386__ */ + asm("bswapl %0" + : "=r" (val) + : "0" (val)); #endif - return x; + return val; } +#define __arch_swab32 __arch_swab32 -static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) +static inline __attribute_const__ __u64 __arch_swab64(__u64 val) { +#ifdef __i386__ union { struct { __u32 a; @@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) __u64 u; } v; v.u = val; -#ifdef CONFIG_X86_BSWAP +# ifdef CONFIG_X86_BSWAP asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#else - v.s.a = ___arch__swab32(v.s.a); - v.s.b = ___arch__swab32(v.s.b); +# else + v.s.a = __arch_swab32(v.s.a); + v.s.b = __arch_swab32(v.s.b); asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#endif +# endif return v.u; -} - #else /* __i386__ */ - -static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) -{ asm("bswapq %0" - : "=r" (x) - : "0" (x)); - return x; -} - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ - asm("bswapl %0" - : "=r" (x) - : "0" (x)); - return x; -} - + : "=r" (val) + : "0" (val)); + return val; #endif +} +#define __arch_swab64 __arch_swab64 -/* Do not define swab16. Gcc is smart enough to recognize "C" version and - convert it into rotation or exhange. */ - -#define __arch__swab64(x) ___arch__swab64(x) -#define __arch__swab32(x) ___arch__swab32(x) - -#define __BYTEORDER_HAS_U64__ - -#endif /* __GNUC__ */ - -#include <linux/byteorder/little_endian.h> +#include <linux/byteorder.h> #endif /* _ASM_X86_BYTEORDER_H */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index cfdf8c2c5c31..ea408dcba513 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -80,7 +80,6 @@ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ @@ -92,6 +91,8 @@ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ @@ -117,6 +118,7 @@ #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ @@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) +#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 3b43a65894c4..4035357f5b9d 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -71,12 +71,10 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { -#ifdef CONFIG_X86_64 struct dma_mapping_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); -#endif return (dma_addr == bad_dma_address); } diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h index a95008457ea4..a8f672ba100c 100644 --- a/arch/x86/include/asm/ds.h +++ b/arch/x86/include/asm/ds.h @@ -6,14 +6,13 @@ * precise-event based sampling (PEBS). * * It manages: - * - per-thread and per-cpu allocation of BTS and PEBS - * - buffer memory allocation (optional) - * - buffer overflow handling + * - DS and BTS hardware configuration + * - buffer overflow handling (to be done) * - buffer access * - * It assumes: - * - get_task_struct on all parameter tasks - * - current is allowed to trace parameter tasks + * It does not do: + * - security checking (is the caller allowed to trace the task) + * - buffer allocation (memory accounting) * * * Copyright (C) 2007-2008 Intel Corporation. @@ -26,11 +25,51 @@ #include <linux/types.h> #include <linux/init.h> +#include <linux/err.h> #ifdef CONFIG_X86_DS struct task_struct; +struct ds_context; +struct ds_tracer; +struct bts_tracer; +struct pebs_tracer; + +typedef void (*bts_ovfl_callback_t)(struct bts_tracer *); +typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *); + + +/* + * A list of features plus corresponding macros to talk about them in + * the ds_request function's flags parameter. + * + * We use the enum to index an array of corresponding control bits; + * we use the macro to index a flags bit-vector. + */ +enum ds_feature { + dsf_bts = 0, + dsf_bts_kernel, +#define BTS_KERNEL (1 << dsf_bts_kernel) + /* trace kernel-mode branches */ + + dsf_bts_user, +#define BTS_USER (1 << dsf_bts_user) + /* trace user-mode branches */ + + dsf_bts_overflow, + dsf_bts_max, + dsf_pebs = dsf_bts_max, + + dsf_pebs_max, + dsf_ctl_max = dsf_pebs_max, + dsf_bts_timestamps = dsf_ctl_max, +#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps) + /* add timestamps into BTS trace */ + +#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS) +}; + /* * Request BTS or PEBS @@ -38,163 +77,169 @@ struct task_struct; * Due to alignement constraints, the actual buffer may be slightly * smaller than the requested or provided buffer. * - * Returns 0 on success; -Eerrno otherwise + * Returns a pointer to a tracer structure on success, or + * ERR_PTR(errcode) on failure. + * + * The interrupt threshold is independent from the overflow callback + * to allow users to use their own overflow interrupt handling mechanism. * * task: the task to request recording for; * NULL for per-cpu recording on the current cpu * base: the base pointer for the (non-pageable) buffer; - * NULL if buffer allocation requested - * size: the size of the requested or provided buffer + * size: the size of the provided buffer in bytes * ovfl: pointer to a function to be called on buffer overflow; * NULL if cyclic buffer requested + * th: the interrupt threshold in records from the end of the buffer; + * -1 if no interrupt threshold is requested. + * flags: a bit-mask of the above flags */ -typedef void (*ds_ovfl_callback_t)(struct task_struct *); -extern int ds_request_bts(struct task_struct *task, void *base, size_t size, - ds_ovfl_callback_t ovfl); -extern int ds_request_pebs(struct task_struct *task, void *base, size_t size, - ds_ovfl_callback_t ovfl); +extern struct bts_tracer *ds_request_bts(struct task_struct *task, + void *base, size_t size, + bts_ovfl_callback_t ovfl, + size_t th, unsigned int flags); +extern struct pebs_tracer *ds_request_pebs(struct task_struct *task, + void *base, size_t size, + pebs_ovfl_callback_t ovfl, + size_t th, unsigned int flags); /* * Release BTS or PEBS resources + * Suspend and resume BTS or PEBS tracing * - * Frees buffers allocated on ds_request. - * - * Returns 0 on success; -Eerrno otherwise - * - * task: the task to release resources for; - * NULL to release resources for the current cpu + * tracer: the tracer handle returned from ds_request_~() */ -extern int ds_release_bts(struct task_struct *task); -extern int ds_release_pebs(struct task_struct *task); +extern void ds_release_bts(struct bts_tracer *tracer); +extern void ds_suspend_bts(struct bts_tracer *tracer); +extern void ds_resume_bts(struct bts_tracer *tracer); +extern void ds_release_pebs(struct pebs_tracer *tracer); +extern void ds_suspend_pebs(struct pebs_tracer *tracer); +extern void ds_resume_pebs(struct pebs_tracer *tracer); -/* - * Return the (array) index of the write pointer. - * (assuming an array of BTS/PEBS records) - * - * Returns -Eerrno on error - * - * task: the task to access; - * NULL to access the current cpu - * pos (out): if not NULL, will hold the result - */ -extern int ds_get_bts_index(struct task_struct *task, size_t *pos); -extern int ds_get_pebs_index(struct task_struct *task, size_t *pos); /* - * Return the (array) index one record beyond the end of the array. - * (assuming an array of BTS/PEBS records) + * The raw DS buffer state as it is used for BTS and PEBS recording. * - * Returns -Eerrno on error - * - * task: the task to access; - * NULL to access the current cpu - * pos (out): if not NULL, will hold the result + * This is the low-level, arch-dependent interface for working + * directly on the raw trace data. */ -extern int ds_get_bts_end(struct task_struct *task, size_t *pos); -extern int ds_get_pebs_end(struct task_struct *task, size_t *pos); +struct ds_trace { + /* the number of bts/pebs records */ + size_t n; + /* the size of a bts/pebs record in bytes */ + size_t size; + /* pointers into the raw buffer: + - to the first entry */ + void *begin; + /* - one beyond the last entry */ + void *end; + /* - one beyond the newest entry */ + void *top; + /* - the interrupt threshold */ + void *ith; + /* flags given on ds_request() */ + unsigned int flags; +}; /* - * Provide a pointer to the BTS/PEBS record at parameter index. - * (assuming an array of BTS/PEBS records) - * - * The pointer points directly into the buffer. The user is - * responsible for copying the record. - * - * Returns the size of a single record on success; -Eerrno on error - * - * task: the task to access; - * NULL to access the current cpu - * index: the index of the requested record - * record (out): pointer to the requested record + * An arch-independent view on branch trace data. */ -extern int ds_access_bts(struct task_struct *task, - size_t index, const void **record); -extern int ds_access_pebs(struct task_struct *task, - size_t index, const void **record); +enum bts_qualifier { + bts_invalid, +#define BTS_INVALID bts_invalid + + bts_branch, +#define BTS_BRANCH bts_branch + + bts_task_arrives, +#define BTS_TASK_ARRIVES bts_task_arrives + + bts_task_departs, +#define BTS_TASK_DEPARTS bts_task_departs + + bts_qual_bit_size = 4, + bts_qual_max = (1 << bts_qual_bit_size), +}; + +struct bts_struct { + __u64 qualifier; + union { + /* BTS_BRANCH */ + struct { + __u64 from; + __u64 to; + } lbr; + /* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */ + struct { + __u64 jiffies; + pid_t pid; + } timestamp; + } variant; +}; -/* - * Write one or more BTS/PEBS records at the write pointer index and - * advance the write pointer. - * - * If size is not a multiple of the record size, trailing bytes are - * zeroed out. - * - * May result in one or more overflow notifications. - * - * If called during overflow handling, that is, with index >= - * interrupt threshold, the write will wrap around. - * - * An overflow notification is given if and when the interrupt - * threshold is reached during or after the write. - * - * Returns the number of bytes written or -Eerrno. - * - * task: the task to access; - * NULL to access the current cpu - * buffer: the buffer to write - * size: the size of the buffer - */ -extern int ds_write_bts(struct task_struct *task, - const void *buffer, size_t size); -extern int ds_write_pebs(struct task_struct *task, - const void *buffer, size_t size); /* - * Same as ds_write_bts/pebs, but omit ownership checks. + * The BTS state. * - * This is needed to have some other task than the owner of the - * BTS/PEBS buffer or the parameter task itself write into the - * respective buffer. + * This gives access to the raw DS state and adds functions to provide + * an arch-independent view of the BTS data. */ -extern int ds_unchecked_write_bts(struct task_struct *task, - const void *buffer, size_t size); -extern int ds_unchecked_write_pebs(struct task_struct *task, - const void *buffer, size_t size); +struct bts_trace { + struct ds_trace ds; + + int (*read)(struct bts_tracer *tracer, const void *at, + struct bts_struct *out); + int (*write)(struct bts_tracer *tracer, const struct bts_struct *in); +}; + /* - * Reset the write pointer of the BTS/PEBS buffer. + * The PEBS state. * - * Returns 0 on success; -Eerrno on error - * - * task: the task to access; - * NULL to access the current cpu + * This gives access to the raw DS state and the PEBS-specific counter + * reset value. */ -extern int ds_reset_bts(struct task_struct *task); -extern int ds_reset_pebs(struct task_struct *task); +struct pebs_trace { + struct ds_trace ds; + + /* the PEBS reset value */ + unsigned long long reset_value; +}; + /* - * Clear the BTS/PEBS buffer and reset the write pointer. - * The entire buffer will be zeroed out. + * Read the BTS or PEBS trace. * - * Returns 0 on success; -Eerrno on error + * Returns a view on the trace collected for the parameter tracer. + * + * The view remains valid as long as the traced task is not running or + * the tracer is suspended. + * Writes into the trace buffer are not reflected. * - * task: the task to access; - * NULL to access the current cpu + * tracer: the tracer handle returned from ds_request_~() */ -extern int ds_clear_bts(struct task_struct *task); -extern int ds_clear_pebs(struct task_struct *task); +extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer); +extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer); + /* - * Provide the PEBS counter reset value. + * Reset the write pointer of the BTS/PEBS buffer. * * Returns 0 on success; -Eerrno on error * - * task: the task to access; - * NULL to access the current cpu - * value (out): the counter reset value + * tracer: the tracer handle returned from ds_request_~() */ -extern int ds_get_pebs_reset(struct task_struct *task, u64 *value); +extern int ds_reset_bts(struct bts_tracer *tracer); +extern int ds_reset_pebs(struct pebs_tracer *tracer); /* * Set the PEBS counter reset value. * * Returns 0 on success; -Eerrno on error * - * task: the task to access; - * NULL to access the current cpu + * tracer: the tracer handle returned from ds_request_pebs() * value: the new counter reset value */ -extern int ds_set_pebs_reset(struct task_struct *task, u64 value); +extern int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value); /* * Initialization @@ -202,39 +247,26 @@ extern int ds_set_pebs_reset(struct task_struct *task, u64 value); struct cpuinfo_x86; extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *); - - /* - * The DS context - part of struct thread_struct. + * Context switch work */ -struct ds_context { - /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */ - unsigned char *ds; - /* the owner of the BTS and PEBS configuration, respectively */ - struct task_struct *owner[2]; - /* buffer overflow notification function for BTS and PEBS */ - ds_ovfl_callback_t callback[2]; - /* the original buffer address */ - void *buffer[2]; - /* the number of allocated pages for on-request allocated buffers */ - unsigned int pages[2]; - /* use count */ - unsigned long count; - /* a pointer to the context location inside the thread_struct - * or the per_cpu context array */ - struct ds_context **this; - /* a pointer to the task owning this context, or NULL, if the - * context is owned by a cpu */ - struct task_struct *task; -}; +extern void ds_switch_to(struct task_struct *prev, struct task_struct *next); -/* called by exit_thread() to free leftover contexts */ -extern void ds_free(struct ds_context *context); +/* + * Task clone/init and cleanup work + */ +extern void ds_copy_thread(struct task_struct *tsk, struct task_struct *father); +extern void ds_exit_thread(struct task_struct *tsk); #else /* CONFIG_X86_DS */ struct cpuinfo_x86; static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {} +static inline void ds_switch_to(struct task_struct *prev, + struct task_struct *next) {} +static inline void ds_copy_thread(struct task_struct *tsk, + struct task_struct *father) {} +static inline void ds_exit_thread(struct task_struct *tsk) {} #endif /* CONFIG_X86_DS */ #endif /* _ASM_X86_DS_H */ diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 804b6e6be929..3afc5e87cfdd 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -6,56 +6,91 @@ #endif /* - Macros for dwarf2 CFI unwind table entries. - See "as.info" for details on these pseudo ops. Unfortunately - they are only supported in very new binutils, so define them - away for older version. + * Macros for dwarf2 CFI unwind table entries. + * See "as.info" for details on these pseudo ops. Unfortunately + * they are only supported in very new binutils, so define them + * away for older version. */ #ifdef CONFIG_AS_CFI -#define CFI_STARTPROC .cfi_startproc -#define CFI_ENDPROC .cfi_endproc -#define CFI_DEF_CFA .cfi_def_cfa -#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register -#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset -#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset -#define CFI_OFFSET .cfi_offset -#define CFI_REL_OFFSET .cfi_rel_offset -#define CFI_REGISTER .cfi_register -#define CFI_RESTORE .cfi_restore -#define CFI_REMEMBER_STATE .cfi_remember_state -#define CFI_RESTORE_STATE .cfi_restore_state -#define CFI_UNDEFINED .cfi_undefined +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register +#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset +#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset +#define CFI_OFFSET .cfi_offset +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_REGISTER .cfi_register +#define CFI_RESTORE .cfi_restore +#define CFI_REMEMBER_STATE .cfi_remember_state +#define CFI_RESTORE_STATE .cfi_restore_state +#define CFI_UNDEFINED .cfi_undefined #ifdef CONFIG_AS_CFI_SIGNAL_FRAME -#define CFI_SIGNAL_FRAME .cfi_signal_frame +#define CFI_SIGNAL_FRAME .cfi_signal_frame #else #define CFI_SIGNAL_FRAME #endif #else -/* Due to the structure of pre-exisiting code, don't use assembler line - comment character # to ignore the arguments. Instead, use a dummy macro. */ +/* + * Due to the structure of pre-exisiting code, don't use assembler line + * comment character # to ignore the arguments. Instead, use a dummy macro. + */ .macro cfi_ignore a=0, b=0, c=0, d=0 .endm -#define CFI_STARTPROC cfi_ignore -#define CFI_ENDPROC cfi_ignore -#define CFI_DEF_CFA cfi_ignore +#define CFI_STARTPROC cfi_ignore +#define CFI_ENDPROC cfi_ignore +#define CFI_DEF_CFA cfi_ignore #define CFI_DEF_CFA_REGISTER cfi_ignore #define CFI_DEF_CFA_OFFSET cfi_ignore #define CFI_ADJUST_CFA_OFFSET cfi_ignore -#define CFI_OFFSET cfi_ignore -#define CFI_REL_OFFSET cfi_ignore -#define CFI_REGISTER cfi_ignore -#define CFI_RESTORE cfi_ignore -#define CFI_REMEMBER_STATE cfi_ignore -#define CFI_RESTORE_STATE cfi_ignore -#define CFI_UNDEFINED cfi_ignore -#define CFI_SIGNAL_FRAME cfi_ignore +#define CFI_OFFSET cfi_ignore +#define CFI_REL_OFFSET cfi_ignore +#define CFI_REGISTER cfi_ignore +#define CFI_RESTORE cfi_ignore +#define CFI_REMEMBER_STATE cfi_ignore +#define CFI_RESTORE_STATE cfi_ignore +#define CFI_UNDEFINED cfi_ignore +#define CFI_SIGNAL_FRAME cfi_ignore #endif +/* + * An attempt to make CFI annotations more or less + * correct and shorter. It is implied that you know + * what you're doing if you use them. + */ +#ifdef __ASSEMBLY__ +#ifdef CONFIG_X86_64 + .macro pushq_cfi reg + pushq \reg + CFI_ADJUST_CFA_OFFSET 8 + .endm + + .macro popq_cfi reg + popq \reg + CFI_ADJUST_CFA_OFFSET -8 + .endm + + .macro movq_cfi reg offset=0 + movq %\reg, \offset(%rsp) + CFI_REL_OFFSET \reg, \offset + .endm + + .macro movq_cfi_restore offset reg + movq \offset(%rsp), %\reg + CFI_RESTORE \reg + .endm +#else /*!CONFIG_X86_64*/ + + /* 32bit defenitions are missed yet */ + +#endif /*!CONFIG_X86_64*/ +#endif /*__ASSEMBLY__*/ + #endif /* _ASM_X86_DWARF2_H */ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 40ca1bea7916..f51a3ddde01a 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -325,7 +325,7 @@ struct linux_binprm; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); extern int syscall32_setup_pages(struct linux_binprm *, int exstack); #define compat_arch_setup_additional_pages syscall32_setup_pages diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h index 94826cf87455..cc70c1c78ca4 100644 --- a/arch/x86/include/asm/emergency-restart.h +++ b/arch/x86/include/asm/emergency-restart.h @@ -8,7 +8,9 @@ enum reboot_type { BOOT_BIOS = 'b', #endif BOOT_ACPI = 'a', - BOOT_EFI = 'e' + BOOT_EFI = 'e', + BOOT_CF9 = 'p', + BOOT_CF9_COND = 'q', }; extern enum reboot_type reboot_type; diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 380f0b4f17ed..e24ef876915f 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -9,31 +9,27 @@ static inline int apic_id_registered(void) return (1); } -static inline cpumask_t target_cpus(void) +static inline cpumask_t target_cpus_cluster(void) { -#if defined CONFIG_ES7000_CLUSTERED_APIC return CPU_MASK_ALL; -#else +} + +static inline cpumask_t target_cpus(void) +{ return cpumask_of_cpu(smp_processor_id()); -#endif } -#if defined CONFIG_ES7000_CLUSTERED_APIC -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE (dest_LowestPrio) -#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ -#define NO_BALANCE_IRQ (1) -#undef WAKE_SECONDARY_VIA_INIT -#define WAKE_SECONDARY_VIA_MIP -#else +#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) +#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio) +#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */ +#define NO_BALANCE_IRQ_CLUSTER (1) + #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define INT_DELIVERY_MODE (dest_Fixed) #define INT_DEST_MODE (0) /* phys delivery to target procs */ #define NO_BALANCE_IRQ (0) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0x0 -#define WAKE_SECONDARY_VIA_INIT -#endif static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { @@ -60,6 +56,16 @@ static inline unsigned long calculate_ldr(int cpu) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ +static inline void init_apic_ldr_cluster(void) +{ + unsigned long val; + int cpu = smp_processor_id(); + + apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); + val = calculate_ldr(cpu); + apic_write(APIC_LDR, val); +} + static inline void init_apic_ldr(void) { unsigned long val; @@ -70,10 +76,6 @@ static inline void init_apic_ldr(void) apic_write(APIC_LDR, val); } -#ifndef CONFIG_X86_GENERICARCH -extern void enable_apic_mode(void); -#endif - extern int apic_version [MAX_APICS]; static inline void setup_apic_routing(void) { @@ -144,7 +146,7 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) return (1); } -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) { int num_bits_set; int cpus_found = 0; @@ -154,11 +156,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) num_bits_set = cpus_weight(cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) -#if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; -#else - return cpu_to_logical_apicid(0); -#endif /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. @@ -171,11 +169,40 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n", __func__); -#if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; -#else + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + +static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +{ + int num_bits_set; + int cpus_found = 0; + int cpu; + int apicid; + + num_bits_set = cpus_weight(cpumask); + /* Return id to all */ + if (num_bits_set == NR_CPUS) + return cpu_to_logical_apicid(0); + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of TARGET_CPUS. + */ + cpu = first_cpu(cpumask); + apicid = cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, cpumask)) { + int new_apicid = cpu_to_logical_apicid(cpu); + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)){ + printk ("%s: Not a valid mask!\n", __func__); return cpu_to_logical_apicid(0); -#endif } apicid = new_apicid; cpus_found++; diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 398493461913..78f0daaee436 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h @@ -1,36 +1,12 @@ #ifndef __ASM_ES7000_WAKECPU_H #define __ASM_ES7000_WAKECPU_H -/* - * This file copes with machines that wakeup secondary CPUs by the - * INIT, INIT, STARTUP sequence. - */ - -#ifdef CONFIG_ES7000_CLUSTERED_APIC -#define WAKE_SECONDARY_VIA_MIP -#else -#define WAKE_SECONDARY_VIA_INIT -#endif - -#ifdef WAKE_SECONDARY_VIA_MIP -extern int es7000_start_cpu(int cpu, unsigned long eip); -static inline int -wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) -{ - int boot_error = 0; - boot_error = es7000_start_cpu(phys_apicid, start_eip); - return boot_error; -} -#endif - -#define TRAMPOLINE_LOW phys_to_virt(0x467) -#define TRAMPOLINE_HIGH phys_to_virt(0x469) - -#define boot_cpu_apicid boot_cpu_physical_apicid +#define TRAMPOLINE_PHYS_LOW 0x467 +#define TRAMPOLINE_PHYS_HIGH 0x469 static inline void wait_for_init_deassert(atomic_t *deassert) { -#ifdef WAKE_SECONDARY_VIA_INIT +#ifndef CONFIG_ES7000_CLUSTERED_APIC while (!atomic_read(deassert)) cpu_relax(); #endif @@ -50,9 +26,12 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } -#define inquire_remote_apic(apicid) do { \ - if (apic_verbosity >= APIC_DEBUG) \ - __inquire_remote_apic(apicid); \ - } while (0) +extern void __inquire_remote_apic(int apicid); + +static inline void inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= APIC_DEBUG) + __inquire_remote_apic(apicid); +} #endif /* __ASM_MACH_WAKECPU_H */ diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9e8bc29b8b17..b55b4a7fbefd 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -1,6 +1,33 @@ #ifndef _ASM_X86_FTRACE_H #define _ASM_X86_FTRACE_H +#ifdef __ASSEMBLY__ + + .macro MCOUNT_SAVE_FRAME + /* taken from glibc */ + subq $0x38, %rsp + movq %rax, (%rsp) + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %rsi, 24(%rsp) + movq %rdi, 32(%rsp) + movq %r8, 40(%rsp) + movq %r9, 48(%rsp) + .endm + + .macro MCOUNT_RESTORE_FRAME + movq 48(%rsp), %r9 + movq 40(%rsp), %r8 + movq 32(%rsp), %rdi + movq 24(%rsp), %rsi + movq 16(%rsp), %rdx + movq 8(%rsp), %rcx + movq (%rsp), %rax + addq $0x38, %rsp + .endm + +#endif + #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((long)(mcount)) #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ @@ -17,8 +44,40 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) */ return addr - 1; } -#endif +#ifdef CONFIG_DYNAMIC_FTRACE + +struct dyn_arch_ftrace { + /* No extra data needed for x86 */ +}; + +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifndef __ASSEMBLY__ + +/* + * Stack of return addresses for functions + * of a thread. + * Used in struct thread_info + */ +struct ftrace_ret_stack { + unsigned long ret; + unsigned long func; + unsigned long long calltime; +}; + +/* + * Primary handler of a function return. + * It relays on ftrace_return_to_handler. + * Defined in entry_32/64.S + */ +extern void return_to_handler(void); + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + #endif /* _ASM_X86_FTRACE_H */ diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 74252264433d..6cfdafa409d8 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h @@ -29,6 +29,39 @@ extern int fix_aperture; #define AMD64_GARTCACHECTL 0x9c #define AMD64_GARTEN (1<<0) +#ifdef CONFIG_GART_IOMMU +extern int gart_iommu_aperture; +extern int gart_iommu_aperture_allowed; +extern int gart_iommu_aperture_disabled; + +extern void early_gart_iommu_check(void); +extern void gart_iommu_init(void); +extern void gart_iommu_shutdown(void); +extern void __init gart_parse_options(char *); +extern void gart_iommu_hole_init(void); + +#else +#define gart_iommu_aperture 0 +#define gart_iommu_aperture_allowed 0 +#define gart_iommu_aperture_disabled 1 + +static inline void early_gart_iommu_check(void) +{ +} +static inline void gart_iommu_init(void) +{ +} +static inline void gart_iommu_shutdown(void) +{ +} +static inline void gart_parse_options(char *options) +{ +} +static inline void gart_iommu_hole_init(void) +{ +} +#endif + extern int agp_amd64_init(void); static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 5cbd4fcc06fd..0ac17d33a8c7 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -2,6 +2,7 @@ #define _ASM_X86_GENAPIC_32_H #include <asm/mpspec.h> +#include <asm/atomic.h> /* * Generic APIC driver interface. @@ -65,6 +66,14 @@ struct genapic { void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); #endif + int (*wakeup_cpu)(int apicid, unsigned long start_eip); + int trampoline_phys_low; + int trampoline_phys_high; + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*store_NMI_vector)(unsigned short *high, unsigned short *low); + void (*restore_NMI_vector)(unsigned short *high, unsigned short *low); + void (*inquire_remote_apic)(int apicid); }; #define APICFUNC(x) .x = x, @@ -105,16 +114,24 @@ struct genapic { APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ APICFUNC(cpu_mask_to_apicid) \ - APICFUNC(vector_allocation_domain) \ + APICFUNC(vector_allocation_domain) \ APICFUNC(acpi_madt_oem_check) \ IPIFUNC(send_IPI_mask) \ IPIFUNC(send_IPI_allbutself) \ IPIFUNC(send_IPI_all) \ APICFUNC(enable_apic_mode) \ APICFUNC(phys_pkg_id) \ + .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \ + .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \ + APICFUNC(wait_for_init_deassert) \ + APICFUNC(smp_callin_clear_local_apic) \ + APICFUNC(store_NMI_vector) \ + APICFUNC(restore_NMI_vector) \ + APICFUNC(inquire_remote_apic) \ } extern struct genapic *genapic; +extern void es7000_update_genapic_to_cluster(void); enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; #define get_uv_system_type() UV_NONE diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 13c4e96199ea..2cae011668b7 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -32,6 +32,8 @@ struct genapic { unsigned int (*get_apic_id)(unsigned long x); unsigned long (*set_apic_id)(unsigned int id); unsigned long apic_id_mask; + /* wakeup_secondary_cpu */ + int (*wakeup_cpu)(int apicid, unsigned long start_eip); }; extern struct genapic *genapic; diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h index 5ca135e72f2b..cf7954d1405f 100644 --- a/arch/x86/include/asm/hardirq_32.h +++ b/arch/x86/include/asm/hardirq_32.h @@ -22,6 +22,8 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) +#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++) + void ack_bad_irq(unsigned int irq); #include <linux/irq_cpustat.h> diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h index 1ba381fc51d3..b5a6b5d56704 100644 --- a/arch/x86/include/asm/hardirq_64.h +++ b/arch/x86/include/asm/hardirq_64.h @@ -11,6 +11,8 @@ #define __ARCH_IRQ_STAT 1 +#define inc_irq_stat(member) add_pda(member, 1) + #define local_softirq_pending() read_pda(__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING 1 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index b97aecb0b61d..8de644b6b959 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -109,9 +109,7 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); #endif #endif -#ifdef CONFIG_X86_32 -extern void (*const interrupt[NR_VECTORS])(void); -#endif +extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h new file mode 100644 index 000000000000..369f5c5d09a1 --- /dev/null +++ b/arch/x86/include/asm/hypervisor.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2008, VMware, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef ASM_X86__HYPERVISOR_H +#define ASM_X86__HYPERVISOR_H + +extern unsigned long get_hypervisor_tsc_freq(void); +extern void init_hypervisor(struct cpuinfo_x86 *c); + +#endif diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index 97989c0e534c..50ca486fd88c 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -129,24 +129,6 @@ typedef struct compat_siginfo { } _sifields; } compat_siginfo_t; -struct sigframe32 { - u32 pretcode; - int sig; - struct sigcontext_ia32 sc; - struct _fpstate_ia32 fpstate; - unsigned int extramask[_COMPAT_NSIG_WORDS-1]; -}; - -struct rt_sigframe32 { - u32 pretcode; - int sig; - u32 pinfo; - u32 puc; - compat_siginfo_t info; - struct ucontext_ia32 uc; - struct _fpstate_ia32 fpstate; -}; - struct ustat32 { __u32 f_tfree; compat_ino_t f_tinode; diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h index 44c89c3a23e9..38d87379e270 100644 --- a/arch/x86/include/asm/idle.h +++ b/arch/x86/include/asm/idle.h @@ -8,8 +8,13 @@ struct notifier_block; void idle_notifier_register(struct notifier_block *n); void idle_notifier_unregister(struct notifier_block *n); +#ifdef CONFIG_X86_64 void enter_idle(void); void exit_idle(void); +#else /* !CONFIG_X86_64 */ +static inline void enter_idle(void) { } +static inline void exit_idle(void) { } +#endif /* CONFIG_X86_64 */ void c1e_remove_cpu(int cpu); diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index ac2abc88cd95..05cfed4485fa 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -4,6 +4,7 @@ #define ARCH_HAS_IOREMAP_WC #include <linux/compiler.h> +#include <asm-generic/int-ll64.h> #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ @@ -45,21 +46,39 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #define mmiowb() barrier() #ifdef CONFIG_X86_64 + build_mmio_read(readq, "q", unsigned long, "=r", :"memory") -build_mmio_read(__readq, "q", unsigned long, "=r", ) build_mmio_write(writeq, "q", unsigned long, "r", :"memory") -build_mmio_write(__writeq, "q", unsigned long, "r", ) -#define readq_relaxed(a) __readq(a) -#define __raw_readq __readq -#define __raw_writeq writeq +#else + +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} + +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr+4); +} -/* Let people know we have them */ -#define readq readq -#define writeq writeq #endif -extern int iommu_bio_merge; +#define readq_relaxed(a) readq(a) + +#define __raw_readq(a) readq(a) +#define __raw_writeq(val, addr) writeq(val, addr) + +/* Let people know that we have them */ +#define readq readq +#define writeq writeq #ifdef CONFIG_X86_32 # include "io_32.h" diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h index fea325a1122f..563c16270ba6 100644 --- a/arch/x86/include/asm/io_64.h +++ b/arch/x86/include/asm/io_64.h @@ -232,8 +232,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c); #define flush_write_buffers() -#define BIO_VMERGE_BOUNDARY iommu_bio_merge - /* * Convert a virtual cached pointer to an uncached pointer */ diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 6afd9933a7dd..e475e009ae5d 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -156,11 +156,21 @@ extern int sis_apic_bug; /* 1 if "noapic" boot option passed */ extern int skip_ioapic_setup; +/* 1 if "noapic" boot option passed */ +extern int noioapicquirk; + +/* -1 if "noapic" boot option passed */ +extern int noioapicreroute; + /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ extern int timer_through_8259; static inline void disable_ioapic_setup(void) { +#ifdef CONFIG_PCI + noioapicquirk = 1; + noioapicreroute = -1; +#endif skip_ioapic_setup = 1; } diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 35276ec5925b..a6ee9e6f530f 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -10,37 +10,4 @@ extern int iommu_detected; /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) -#ifdef CONFIG_GART_IOMMU -extern int gart_iommu_aperture; -extern int gart_iommu_aperture_allowed; -extern int gart_iommu_aperture_disabled; - -extern void early_gart_iommu_check(void); -extern void gart_iommu_init(void); -extern void gart_iommu_shutdown(void); -extern void __init gart_parse_options(char *); -extern void gart_iommu_hole_init(void); - -#else -#define gart_iommu_aperture 0 -#define gart_iommu_aperture_allowed 0 -#define gart_iommu_aperture_disabled 1 - -static inline void early_gart_iommu_check(void) -{ -} -static inline void gart_iommu_init(void) -{ -} -static inline void gart_iommu_shutdown(void) -{ -} -static inline void gart_parse_options(char *options) -{ -} -static inline void gart_iommu_hole_init(void) -{ -} -#endif - #endif /* _ASM_X86_IOMMU_H */ diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index bae0eda95486..28e409fc73f3 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq) # endif #endif -#ifdef CONFIG_IRQBALANCE -extern int irqbalance_disable(char *str); -#endif - #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpumask.h> extern void fixup_irqs(cpumask_t map); diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h index af2f02d27fc7..86afd7473457 100644 --- a/arch/x86/include/asm/irq_regs_32.h +++ b/arch/x86/include/asm/irq_regs_32.h @@ -9,6 +9,8 @@ #include <asm/percpu.h> +#define ARCH_HAS_OWN_IRQ_REGS + DECLARE_PER_CPU(struct pt_regs *, irq_regs); static inline struct pt_regs *get_irq_regs(void) diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index a1f22771a15a..c61d8b2ab8b9 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -5,21 +5,8 @@ # define PA_CONTROL_PAGE 0 # define VA_CONTROL_PAGE 1 # define PA_PGD 2 -# define VA_PGD 3 -# define PA_PTE_0 4 -# define VA_PTE_0 5 -# define PA_PTE_1 6 -# define VA_PTE_1 7 -# define PA_SWAP_PAGE 8 -# ifdef CONFIG_X86_PAE -# define PA_PMD_0 9 -# define VA_PMD_0 10 -# define PA_PMD_1 11 -# define VA_PMD_1 12 -# define PAGES_NR 13 -# else -# define PAGES_NR 9 -# endif +# define PA_SWAP_PAGE 3 +# define PAGES_NR 4 #else # define PA_CONTROL_PAGE 0 # define VA_CONTROL_PAGE 1 @@ -170,6 +157,20 @@ relocate_kernel(unsigned long indirection_page, unsigned long start_address) ATTRIB_NORET; #endif +#ifdef CONFIG_X86_32 +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + pgd_t *pgd; +#ifdef CONFIG_X86_PAE + pmd_t *pmd0; + pmd_t *pmd1; +#endif + pte_t *pte0; + pte_t *pte1; +}; +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_KEXEC_H */ diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index f61ee8f937e4..5d98d0b68ffc 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -57,5 +57,65 @@ #define __ALIGN_STR ".align 16,0x90" #endif +/* + * to check ENTRY_X86/END_X86 and + * KPROBE_ENTRY_X86/KPROBE_END_X86 + * unbalanced-missed-mixed appearance + */ +#define __set_entry_x86 .set ENTRY_X86_IN, 0 +#define __unset_entry_x86 .set ENTRY_X86_IN, 1 +#define __set_kprobe_x86 .set KPROBE_X86_IN, 0 +#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1 + +#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed" + +#define __check_entry_x86 \ + .ifdef ENTRY_X86_IN; \ + .ifeq ENTRY_X86_IN; \ + __macro_err_x86; \ + .abort; \ + .endif; \ + .endif + +#define __check_kprobe_x86 \ + .ifdef KPROBE_X86_IN; \ + .ifeq KPROBE_X86_IN; \ + __macro_err_x86; \ + .abort; \ + .endif; \ + .endif + +#define __check_entry_kprobe_x86 \ + __check_entry_x86; \ + __check_kprobe_x86 + +#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86 + +#define ENTRY_X86(name) \ + __check_entry_kprobe_x86; \ + __set_entry_x86; \ + .globl name; \ + __ALIGN; \ + name: + +#define END_X86(name) \ + __unset_entry_x86; \ + __check_entry_kprobe_x86; \ + .size name, .-name + +#define KPROBE_ENTRY_X86(name) \ + __check_entry_kprobe_x86; \ + __set_kprobe_x86; \ + .pushsection .kprobes.text, "ax"; \ + .globl name; \ + __ALIGN; \ + name: + +#define KPROBE_END_X86(name) \ + __unset_kprobe_x86; \ + __check_entry_kprobe_x86; \ + .size name, .-name; \ + .popsection + #endif /* _ASM_X86_LINKAGE_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index ff3a6c236c00..6cb3a467e067 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -32,11 +32,13 @@ static inline cpumask_t target_cpus(void) #define vector_allocation_domain (genapic->vector_allocation_domain) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) #define send_IPI_self (genapic->send_IPI_self) +#define wakeup_secondary_cpu (genapic->wakeup_cpu) extern void setup_apic_routing(void); #else #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ #define TARGET_CPUS (target_cpus()) +#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init /* * Set up the logical destination ID. * diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index 9d80db91e992..ceb013660146 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -1,17 +1,8 @@ #ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H #define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H -/* - * This file copes with machines that wakeup secondary CPUs by the - * INIT, INIT, STARTUP sequence. - */ - -#define WAKE_SECONDARY_VIA_INIT - -#define TRAMPOLINE_LOW phys_to_virt(0x467) -#define TRAMPOLINE_HIGH phys_to_virt(0x469) - -#define boot_cpu_apicid boot_cpu_physical_apicid +#define TRAMPOLINE_PHYS_LOW (0x467) +#define TRAMPOLINE_PHYS_HIGH (0x469) static inline void wait_for_init_deassert(atomic_t *deassert) { @@ -33,9 +24,12 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } -#define inquire_remote_apic(apicid) do { \ - if (apic_verbosity >= APIC_DEBUG) \ - __inquire_remote_apic(apicid); \ - } while (0) +extern void __inquire_remote_apic(int apicid); + +static inline void inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= APIC_DEBUG) + __inquire_remote_apic(apicid); +} #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/mach-default/smpboot_hooks.h index dbab36d64d48..23bf52103b89 100644 --- a/arch/x86/include/asm/mach-default/smpboot_hooks.h +++ b/arch/x86/include/asm/mach-default/smpboot_hooks.h @@ -13,9 +13,11 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) CMOS_WRITE(0xa, 0xf); local_flush_tlb(); pr_debug("1.\n"); - *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = + start_eip >> 4; pr_debug("2.\n"); - *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = + start_eip & 0xf; pr_debug("3.\n"); } @@ -32,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ CMOS_WRITE(0, 0xf); - *((volatile long *) phys_to_virt(0x467)) = 0; + *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; } static inline void __init smpboot_setup_io_apic(void) diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index 5180bd7478fb..e430f47df667 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -27,6 +27,7 @@ #define vector_allocation_domain (genapic->vector_allocation_domain) #define enable_apic_mode (genapic->enable_apic_mode) #define phys_pkg_id (genapic->phys_pkg_id) +#define wakeup_secondary_cpu (genapic->wakeup_cpu) extern void generic_bigsmp_probe(void); diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h new file mode 100644 index 000000000000..1ab16b168c8a --- /dev/null +++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h @@ -0,0 +1,12 @@ +#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H +#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H + +#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low) +#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high) +#define wait_for_init_deassert (genapic->wait_for_init_deassert) +#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic) +#define store_NMI_vector (genapic->store_NMI_vector) +#define restore_NMI_vector (genapic->restore_NMI_vector) +#define inquire_remote_apic (genapic->inquire_remote_apic) + +#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */ diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h index 8e10015781fb..7e98ce1d2c0e 100644 --- a/arch/x86/include/asm/mmu_context_32.h +++ b/arch/x86/include/asm/mmu_context_32.h @@ -4,9 +4,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP - unsigned cpu = smp_processor_id(); - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; + if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) + x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY); #endif } @@ -20,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev, /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; - per_cpu(cpu_tlbstate, cpu).active_mm = next; + x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); + x86_write_percpu(cpu_tlbstate.active_mm, next); #endif cpu_set(cpu, next->cpu_vm_mask); @@ -36,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, } #ifdef CONFIG_SMP else { - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; - BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); + x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index e38859d577a1..cb58643947b9 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -85,7 +85,9 @@ /* AMD64 MSRs. Not complete. See the architecture manual for a more complete list. */ +#define MSR_AMD64_PATCH_LEVEL 0x0000008b #define MSR_AMD64_NB_CFG 0xc001001f +#define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index c2a812ebde89..638bf6241807 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -22,10 +22,10 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) } /* - * i386 calling convention returns 64-bit value in edx:eax, while - * x86_64 returns at rax. Also, the "A" constraint does not really - * mean rdx:rax in x86_64, so we need specialized behaviour for each - * architecture + * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" + * constraint has different meanings. For i386, "A" means exactly + * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, + * it means rax *or* rdx. */ #ifdef CONFIG_X86_64 #define DECLARE_ARGS(val, low, high) unsigned low, high @@ -85,7 +85,8 @@ static inline void native_write_msr(unsigned int msr, asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); } -static inline int native_write_msr_safe(unsigned int msr, +/* Can be uninlined because referenced by paravirt */ +notrace static inline int native_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int err; @@ -181,10 +182,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) } #define rdtscl(low) \ - ((low) = (u32)native_read_tsc()) + ((low) = (u32)__native_read_tsc()) #define rdtscll(val) \ - ((val) = native_read_tsc()) + ((val) = __native_read_tsc()) #define rdpmc(counter, low, high) \ do { \ diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h index c577bda5b1c5..6f499df8eddb 100644 --- a/arch/x86/include/asm/numaq/wakecpu.h +++ b/arch/x86/include/asm/numaq/wakecpu.h @@ -3,12 +3,8 @@ /* This file copes with machines that wakeup secondary CPUs by NMIs */ -#define WAKE_SECONDARY_VIA_NMI - -#define TRAMPOLINE_LOW phys_to_virt(0x8) -#define TRAMPOLINE_HIGH phys_to_virt(0xa) - -#define boot_cpu_apicid boot_cpu_logical_apicid +#define TRAMPOLINE_PHYS_LOW (0x8) +#define TRAMPOLINE_PHYS_HIGH (0xa) /* We don't do anything here because we use NMI's to boot instead */ static inline void wait_for_init_deassert(atomic_t *deassert) @@ -27,17 +23,23 @@ static inline void smp_callin_clear_local_apic(void) static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { printk("Storing NMI vector\n"); - *high = *((volatile unsigned short *) TRAMPOLINE_HIGH); - *low = *((volatile unsigned short *) TRAMPOLINE_LOW); + *high = + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)); + *low = + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)); } static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { printk("Restoring NMI vector\n"); - *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high; - *((volatile unsigned short *) TRAMPOLINE_LOW) = *low; + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = + *high; + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = + *low; } -#define inquire_remote_apic(apicid) {} +static inline void inquire_remote_apic(int apicid) +{ +} #endif /* __ASM_NUMAQ_WAKECPU_H */ diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 50ac542c9382..66834c41c049 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -19,6 +19,8 @@ struct pci_sysdata { }; extern int pci_routeirq; +extern int noioapicquirk; +extern int noioapicreroute; /* scan a bus after allocating a pci_sysdata for it */ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index b17edfd23628..e0d199fe1d83 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -56,23 +56,55 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #define pte_none(x) (!(x).pte_low) /* - * Bits 0, 6 and 7 are taken, split up the 29 bits of offset - * into this range: + * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, + * split up the 29 bits of offset into this range: */ #define PTE_FILE_MAX_BITS 29 +#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) +#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE +#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) +#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) +#else +#define PTE_FILE_SHIFT2 (_PAGE_BIT_PROTNONE + 1) +#define PTE_FILE_SHIFT3 (_PAGE_BIT_FILE + 1) +#endif +#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) +#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) #define pte_to_pgoff(pte) \ - ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5)) + ((((pte).pte_low >> PTE_FILE_SHIFT1) \ + & ((1U << PTE_FILE_BITS1) - 1)) \ + + ((((pte).pte_low >> PTE_FILE_SHIFT2) \ + & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \ + + (((pte).pte_low >> PTE_FILE_SHIFT3) \ + << (PTE_FILE_BITS1 + PTE_FILE_BITS2))) #define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \ - (((off) >> 5) << 8) + _PAGE_FILE }) + ((pte_t) { .pte_low = \ + (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ + + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \ + << PTE_FILE_SHIFT2) \ + + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ + << PTE_FILE_SHIFT3) \ + + _PAGE_FILE }) /* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val >> 1) & 0x1f) -#define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) \ - ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) +#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE +#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) +#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) +#else +#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1) +#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1) +#endif + +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) + +#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ + & ((1U << SWP_TYPE_BITS) - 1)) +#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type) << (_PAGE_BIT_PRESENT + 1)) \ + | ((offset) << SWP_OFFSET_SHIFT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 52597aeadfff..447da43cddb3 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -166,6 +166,7 @@ static inline int pte_none(pte_t pte) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index c012f3b11671..83e69f4a37f0 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -10,7 +10,6 @@ #define _PAGE_BIT_PCD 4 /* page cache disabled */ #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ -#define _PAGE_BIT_FILE 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ @@ -22,6 +21,12 @@ #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ +/* If _PAGE_BIT_PRESENT is clear, we use these: */ +/* - if the user mapped it with PROT_NONE; pte_present gives true */ +#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL +/* - set: nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY + #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) @@ -46,11 +51,8 @@ #define _PAGE_NX (_AT(pteval_t, 0)) #endif -/* If _PAGE_PRESENT is clear, we use these: */ -#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, - * saved PTE; unset:swap */ -#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; - pte_present gives true */ +#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) +#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ _PAGE_ACCESSED | _PAGE_DIRTY) @@ -158,8 +160,19 @@ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif +/* + * Macro to mark a page protection value as UC- + */ +#define pgprot_noncached(prot) \ + ((boot_cpu_data.x86 > 3) \ + ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ + : (prot)) + #ifndef __ASSEMBLY__ +#define pgprot_writecombine pgprot_writecombine +extern pgprot_t pgprot_writecombine(pgprot_t prot); + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -329,6 +342,9 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) #ifndef __ASSEMBLY__ +/* Indicate that x86 has its own track and untrack pfn vma functions */ +#define __HAVE_PFNMAP_TRACKING + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index f9d5889b336b..72b020deb46b 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -101,15 +101,6 @@ extern unsigned long pg0[]; #endif /* - * Macro to mark a page protection value as "uncacheable". - * On processors which do not support it, this is a no-op. - */ -#define pgprot_noncached(prot) \ - ((boot_cpu_data.x86 > 3) \ - ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ - : (prot)) - -/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 545a0e042bb2..ba09289accaa 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -146,7 +146,7 @@ static inline void native_pgd_clear(pgd_t *pgd) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) -#define MAXMEM _AC(0x00003fffffffffff, UL) +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) #define VMALLOC_START _AC(0xffffc20000000000, UL) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) @@ -177,12 +177,6 @@ static inline int pmd_bad(pmd_t pmd) #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ /* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) \ - (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT)) - -/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ @@ -250,10 +244,22 @@ static inline int pud_large(pud_t pte) extern int direct_gbpages; /* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val >> 1) & 0x3f) -#define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ - ((offset) << 8) }) +#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE +#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) +#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) +#else +#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1) +#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1) +#endif + +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) + +#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ + & ((1U << SWP_TYPE_BITS) - 1)) +#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type) << (_PAGE_BIT_PRESENT + 1)) \ + | ((offset) << SWP_OFFSET_SHIFT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h index fe681147a4f7..a8894647dd9a 100644 --- a/arch/x86/include/asm/prctl.h +++ b/arch/x86/include/asm/prctl.h @@ -6,5 +6,8 @@ #define ARCH_GET_FS 0x1003 #define ARCH_GET_GS 0x1004 +#ifdef CONFIG_X86_64 +extern long sys_arch_prctl(int, unsigned long); +#endif /* CONFIG_X86_64 */ #endif /* _ASM_X86_PRCTL_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 5ca01e383269..091cd8855f2e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -110,6 +110,7 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; #endif + unsigned int x86_hyper_vendor; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -123,6 +124,9 @@ struct cpuinfo_x86 { #define X86_VENDOR_UNKNOWN 0xff +#define X86_HYPER_VENDOR_NONE 0 +#define X86_HYPER_VENDOR_VMWARE 1 + /* * capabilities of CPUs */ @@ -752,6 +756,19 @@ extern void switch_to_new_gdt(void); extern void cpu_init(void); extern void init_gdt(int cpu); +static inline unsigned long get_debugctlmsr(void) +{ + unsigned long debugctlmsr = 0; + +#ifndef CONFIG_X86_DEBUGCTLMSR + if (boot_cpu_data.x86 < 6) + return 0; +#endif + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); + + return debugctlmsr; +} + static inline void update_debugctlmsr(unsigned long debugctlmsr) { #ifndef CONFIG_X86_DEBUGCTLMSR diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index eefb0594b058..6d34d954c228 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -6,7 +6,6 @@ #include <asm/processor-flags.h> #ifdef __KERNEL__ -#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */ #include <asm/segment.h> #endif @@ -128,34 +127,6 @@ struct pt_regs { #endif /* !__i386__ */ -#ifdef CONFIG_X86_PTRACE_BTS -/* a branch trace record entry - * - * In order to unify the interface between various processor versions, - * we use the below data structure for all processors. - */ -enum bts_qualifier { - BTS_INVALID = 0, - BTS_BRANCH, - BTS_TASK_ARRIVES, - BTS_TASK_DEPARTS -}; - -struct bts_struct { - __u64 qualifier; - union { - /* BTS_BRANCH */ - struct { - __u64 from_ip; - __u64 to_ip; - } lbr; - /* BTS_TASK_ARRIVES or - BTS_TASK_DEPARTS */ - __u64 jiffies; - } variant; -}; -#endif /* CONFIG_X86_PTRACE_BTS */ - #ifdef __KERNEL__ #include <linux/init.h> @@ -163,13 +134,6 @@ struct bts_struct { struct cpuinfo_x86; struct task_struct; -#ifdef CONFIG_X86_PTRACE_BTS -extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *); -extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); -#else -#define ptrace_bts_init_intel(config) do {} while (0) -#endif /* CONFIG_X86_PTRACE_BTS */ - extern unsigned long profile_pc(struct pt_regs *regs); extern unsigned long @@ -271,6 +235,13 @@ extern int do_get_thread_area(struct task_struct *p, int idx, extern int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *info, int can_allocate); +extern void x86_ptrace_untrace(struct task_struct *); +extern void x86_ptrace_fork(struct task_struct *child, + unsigned long clone_flags); + +#define arch_ptrace_untrace(tsk) x86_ptrace_untrace(tsk) +#define arch_ptrace_fork(child, flags) x86_ptrace_fork(child, flags) + #endif /* __KERNEL__ */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index df7710354f85..562d4fd31ba8 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_REBOOT_H #define _ASM_X86_REBOOT_H +#include <linux/kdebug.h> + struct pt_regs; struct machine_ops { @@ -18,4 +20,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs); void native_machine_shutdown(void); void machine_real_restart(const unsigned char *code, int length); +typedef void (*nmi_shootdown_cb)(int, struct die_args*); +void nmi_shootdown_cpus(nmi_shootdown_cb callback); + #endif /* _ASM_X86_REBOOT_H */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f12d37237465..4fcd53fd5f43 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -8,6 +8,10 @@ /* Interrupt control for vSMPowered x86_64 systems */ void vsmp_init(void); + +void setup_bios_corruption_check(void); + + #ifdef CONFIG_X86_VISWS extern void visws_early_detect(void); extern int is_visws_box(void); @@ -16,6 +20,8 @@ static inline void visws_early_detect(void) { } static inline int is_visws_box(void) { return 0; } #endif +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); /* * Any setup quirks to be performed? */ @@ -39,6 +45,7 @@ struct x86_quirks { void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, unsigned short oemsize); int (*setup_ioapic_ids)(void); + int (*update_genapic)(void); }; extern struct x86_quirks *x86_quirks; diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h new file mode 100644 index 000000000000..4e0fe26d27d3 --- /dev/null +++ b/arch/x86/include/asm/sigframe.h @@ -0,0 +1,70 @@ +#ifndef _ASM_X86_SIGFRAME_H +#define _ASM_X86_SIGFRAME_H + +#include <asm/sigcontext.h> +#include <asm/siginfo.h> +#include <asm/ucontext.h> + +#ifdef CONFIG_X86_32 +#define sigframe_ia32 sigframe +#define rt_sigframe_ia32 rt_sigframe +#define sigcontext_ia32 sigcontext +#define _fpstate_ia32 _fpstate +#define ucontext_ia32 ucontext +#else /* !CONFIG_X86_32 */ + +#ifdef CONFIG_IA32_EMULATION +#include <asm/ia32.h> +#endif /* CONFIG_IA32_EMULATION */ + +#endif /* CONFIG_X86_32 */ + +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) +struct sigframe_ia32 { + u32 pretcode; + int sig; + struct sigcontext_ia32 sc; + /* + * fpstate is unused. fpstate is moved/allocated after + * retcode[] below. This movement allows to have the FP state and the + * future state extensions (xsave) stay together. + * And at the same time retaining the unused fpstate, prevents changing + * the offset of extramask[] in the sigframe and thus prevent any + * legacy application accessing/modifying it. + */ + struct _fpstate_ia32 fpstate_unused; +#ifdef CONFIG_IA32_EMULATION + unsigned int extramask[_COMPAT_NSIG_WORDS-1]; +#else /* !CONFIG_IA32_EMULATION */ + unsigned long extramask[_NSIG_WORDS-1]; +#endif /* CONFIG_IA32_EMULATION */ + char retcode[8]; + /* fp state follows here */ +}; + +struct rt_sigframe_ia32 { + u32 pretcode; + int sig; + u32 pinfo; + u32 puc; +#ifdef CONFIG_IA32_EMULATION + compat_siginfo_t info; +#else /* !CONFIG_IA32_EMULATION */ + struct siginfo info; +#endif /* CONFIG_IA32_EMULATION */ + struct ucontext_ia32 uc; + char retcode[8]; + /* fp state follows here */ +}; +#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ + +#ifdef CONFIG_X86_64 +struct rt_sigframe { + char __user *pretcode; + struct ucontext uc; + struct siginfo info; + /* fp state follows here */ +}; +#endif /* CONFIG_X86_64 */ + +#endif /* _ASM_X86_SIGFRAME_H */ diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 96ac44f275da..7761a5d554bb 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -121,6 +121,10 @@ typedef unsigned long sigset_t; #ifndef __ASSEMBLY__ +# ifdef __KERNEL__ +extern void do_notify_resume(struct pt_regs *, void *, __u32); +# endif /* __KERNEL__ */ + #ifdef __i386__ # ifdef __KERNEL__ struct old_sigaction { @@ -141,8 +145,6 @@ struct k_sigaction { struct sigaction sa; }; -extern void do_notify_resume(struct pt_regs *, void *, __u32); - # else /* __KERNEL__ */ /* Here we must cater to libcs that poke about in kernel headers. */ diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h index be44f7dab395..e3cc3c063ec5 100644 --- a/arch/x86/include/asm/sparsemem.h +++ b/arch/x86/include/asm/sparsemem.h @@ -27,7 +27,7 @@ #else /* CONFIG_X86_32 */ # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ # define MAX_PHYSADDR_BITS 44 -# define MAX_PHYSMEM_BITS 44 +# define MAX_PHYSMEM_BITS 44 /* Can be max 45 bits */ #endif #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 87803da44010..9c6797c3e56c 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -19,6 +19,13 @@ /* kernel/ioport.c */ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); +/* kernel/ldt.c */ +asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); + +/* kernel/tls.c */ +asmlinkage int sys_set_thread_area(struct user_desc __user *); +asmlinkage int sys_get_thread_area(struct user_desc __user *); + /* X86_32 only */ #ifdef CONFIG_X86_32 /* kernel/process_32.c */ @@ -33,14 +40,11 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); asmlinkage int sys_sigaltstack(unsigned long); asmlinkage unsigned long sys_sigreturn(unsigned long); -asmlinkage int sys_rt_sigreturn(unsigned long); +asmlinkage int sys_rt_sigreturn(struct pt_regs); /* kernel/ioport.c */ asmlinkage long sys_iopl(unsigned long); -/* kernel/ldt.c */ -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); - /* kernel/sys_i386_32.c */ asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); @@ -54,10 +58,6 @@ asmlinkage int sys_uname(struct old_utsname __user *); struct oldold_utsname; asmlinkage int sys_olduname(struct oldold_utsname __user *); -/* kernel/tls.c */ -asmlinkage int sys_set_thread_area(struct user_desc __user *); -asmlinkage int sys_get_thread_area(struct user_desc __user *); - /* kernel/vm86_32.c */ asmlinkage int sys_vm86old(struct pt_regs); asmlinkage int sys_vm86(struct pt_regs); diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 2ed3f0f44ff7..8e626ea33a1a 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -17,12 +17,12 @@ # define AT_VECTOR_SIZE_ARCH 1 #endif -#ifdef CONFIG_X86_32 - struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); +#ifdef CONFIG_X86_32 + /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. @@ -314,6 +314,8 @@ extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); +void stop_this_cpu(void *dummy); + /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e44d379faad2..98789647baa9 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -20,11 +20,13 @@ struct task_struct; struct exec_domain; #include <asm/processor.h> +#include <asm/ftrace.h> +#include <asm/atomic.h> struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ - unsigned long flags; /* low level flags */ + __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, @@ -91,7 +93,6 @@ struct thread_info { #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ -#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -113,7 +114,6 @@ struct thread_info { #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) -#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) /* work to do in syscall_trace_enter() */ #define _TIF_WORK_SYSCALL_ENTRY \ @@ -139,8 +139,7 @@ struct thread_info { /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \ - _TIF_NOTSC) + (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index fa0d79facdbc..780ba0ab94f9 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h @@ -3,6 +3,7 @@ #ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_TRAMPOLINE /* * Trampoline 80x86 program as an array. */ @@ -13,8 +14,14 @@ extern unsigned char *trampoline_base; extern unsigned long init_rsp; extern unsigned long initial_code; +#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) #define TRAMPOLINE_BASE 0x6000 + extern unsigned long setup_trampoline(void); +extern void __init reserve_trampoline_memory(void); +#else +static inline void reserve_trampoline_memory(void) {}; +#endif /* CONFIG_X86_TRAMPOLINE */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 45dee286e45c..2ee0a3bceedf 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -46,6 +46,10 @@ dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); dotraplinkage void do_invalid_TSS(struct pt_regs *, long); dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_stack_segment(struct pt_regs *, long); +#ifdef CONFIG_X86_64 +dotraplinkage void do_double_fault(struct pt_regs *, long); +asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *); +#endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long); @@ -72,10 +76,13 @@ static inline int get_si_code(unsigned long condition) extern int panic_on_unrecovered_nmi; extern int kstack_depth_to_print; -#ifdef CONFIG_X86_32 void math_error(void __user *); -unsigned long patch_espfix_desc(unsigned long, unsigned long); asmlinkage void math_emulate(long); +#ifdef CONFIG_X86_32 +unsigned long patch_espfix_desc(unsigned long, unsigned long); +#else +asmlinkage void smp_thermal_interrupt(void); +asmlinkage void mce_threshold_interrupt(void); #endif #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 9cd83a8e40d5..38ae163cc91b 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void) static __always_inline cycles_t vget_cycles(void) { - cycles_t cycles; - /* * We only do VDSOs on TSC capable CPUs, so this shouldnt * access boot_cpu_data (which is not VDSO-safe): @@ -44,11 +42,7 @@ static __always_inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - rdtsc_barrier(); - cycles = (cycles_t)__native_read_tsc(); - rdtsc_barrier(); - - return cycles; + return (cycles_t)__native_read_tsc(); } extern void tsc_init(void); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 99192bb55a53..4340055b7559 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -352,14 +352,14 @@ do { \ #define __put_user_nocheck(x, ptr, size) \ ({ \ - long __pu_err; \ + int __pu_err; \ __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ __pu_err; \ }) #define __get_user_nocheck(x, ptr, size) \ ({ \ - long __gu_err; \ + int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index d931d3b7e6f7..7ed17ff502b9 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -32,13 +32,18 @@ enum uv_bios_cmd { UV_BIOS_COMMON, UV_BIOS_GET_SN_INFO, - UV_BIOS_FREQ_BASE + UV_BIOS_FREQ_BASE, + UV_BIOS_WATCHLIST_ALLOC, + UV_BIOS_WATCHLIST_FREE, + UV_BIOS_MEMPROTECT, + UV_BIOS_GET_PARTITION_ADDR }; /* * Status values returned from a BIOS call. */ enum { + BIOS_STATUS_MORE_PASSES = 1, BIOS_STATUS_SUCCESS = 0, BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, BIOS_STATUS_EINVAL = -EINVAL, @@ -71,6 +76,21 @@ union partition_info_u { }; }; +union uv_watchlist_u { + u64 val; + struct { + u64 blade : 16, + size : 32, + filler : 16; + }; +}; + +enum uv_memprotect { + UV_MEMPROT_RESTRICT_ACCESS, + UV_MEMPROT_ALLOW_AMO, + UV_MEMPROT_ALLOW_RW +}; + /* * bios calls have 6 parameters */ @@ -80,14 +100,20 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64); extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *); extern s64 uv_bios_freq_base(u64, u64 *); +extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int, + unsigned long *); +extern int uv_bios_mq_watchlist_free(int, int); +extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); +extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); extern void uv_bios_init(void); +extern unsigned long sn_rtc_cycles_per_second; extern int uv_type; extern long sn_partition_id; -extern long uv_coherency_id; -extern long uv_region_size; -#define partition_coherence_id() (uv_coherency_id) +extern long sn_coherency_id; +extern long sn_region_size; +#define partition_coherence_id() (sn_coherency_id) extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 7a5782610b2b..777327ef05c1 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -113,25 +113,37 @@ */ #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) +struct uv_scir_s { + struct timer_list timer; + unsigned long offset; + unsigned long last; + unsigned long idle_on; + unsigned long idle_off; + unsigned char state; + unsigned char enabled; +}; + /* * The following defines attributes of the HUB chip. These attributes are * frequently referenced and are kept in the per-cpu data areas of each cpu. * They are kept together in a struct to minimize cache misses. */ struct uv_hub_info_s { - unsigned long global_mmr_base; - unsigned long gpa_mask; - unsigned long gnode_upper; - unsigned long lowmem_remap_top; - unsigned long lowmem_remap_base; - unsigned short pnode; - unsigned short pnode_mask; - unsigned short coherency_domain_number; - unsigned short numa_blade_id; - unsigned char blade_processor_id; - unsigned char m_val; - unsigned char n_val; + unsigned long global_mmr_base; + unsigned long gpa_mask; + unsigned long gnode_upper; + unsigned long lowmem_remap_top; + unsigned long lowmem_remap_base; + unsigned short pnode; + unsigned short pnode_mask; + unsigned short coherency_domain_number; + unsigned short numa_blade_id; + unsigned char blade_processor_id; + unsigned char m_val; + unsigned char n_val; + struct uv_scir_s scir; }; + DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) @@ -163,6 +175,30 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); #define UV_APIC_PNODE_SHIFT 6 +/* Local Bus from cpu's perspective */ +#define LOCAL_BUS_BASE 0x1c00000 +#define LOCAL_BUS_SIZE (4 * 1024 * 1024) + +/* + * System Controller Interface Reg + * + * Note there are NO leds on a UV system. This register is only + * used by the system controller to monitor system-wide operation. + * There are 64 regs per node. With Nahelem cpus (2 cores per node, + * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on + * a node. + * + * The window is located at top of ACPI MMR space + */ +#define SCIR_WINDOW_COUNT 64 +#define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \ + LOCAL_BUS_SIZE - \ + SCIR_WINDOW_COUNT) + +#define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */ +#define SCIR_CPU_ACTIVITY 0x02 /* not idle */ +#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ + /* * Macros for converting between kernel virtual addresses, socket local physical * addresses, and UV global physical addresses. @@ -174,7 +210,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) { if (paddr < uv_hub_info->lowmem_remap_top) - paddr += uv_hub_info->lowmem_remap_base; + paddr |= uv_hub_info->lowmem_remap_base; return paddr | uv_hub_info->gnode_upper; } @@ -182,19 +218,7 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) /* socket virtual --> UV global physical address */ static inline unsigned long uv_gpa(void *v) { - return __pa(v) | uv_hub_info->gnode_upper; -} - -/* socket virtual --> UV global physical address */ -static inline void *uv_vgpa(void *v) -{ - return (void *)uv_gpa(v); -} - -/* UV global physical address --> socket virtual */ -static inline void *uv_va(unsigned long gpa) -{ - return __va(gpa & uv_hub_info->gpa_mask); + return uv_soc_phys_ram_to_gpa(__pa(v)); } /* pnode, offset --> socket virtual */ @@ -277,6 +301,16 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) *uv_local_mmr_address(offset) = val; } +static inline unsigned char uv_read_local_mmr8(unsigned long offset) +{ + return *((unsigned char *)uv_local_mmr_address(offset)); +} + +static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) +{ + *((unsigned char *)uv_local_mmr_address(offset)) = val; +} + /* * Structures and definitions for converting between cpu, node, pnode, and blade * numbers. @@ -351,5 +385,20 @@ static inline int uv_num_possible_blades(void) return uv_possible_blades; } -#endif /* _ASM_X86_UV_UV_HUB_H */ +/* Update SCIR state */ +static inline void uv_set_scir_bits(unsigned char value) +{ + if (uv_hub_info->scir.state != value) { + uv_hub_info->scir.state = value; + uv_write_local_mmr8(uv_hub_info->scir.offset, value); + } +} +static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) +{ + if (uv_cpu_hub_info(cpu)->scir.state != value) { + uv_cpu_hub_info(cpu)->scir.state = value; + uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value); + } +} +#endif /* _ASM_X86_UV_UV_HUB_H */ diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h new file mode 100644 index 000000000000..c11b7e100d83 --- /dev/null +++ b/arch/x86/include/asm/vmware.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2008, VMware, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef ASM_X86__VMWARE_H +#define ASM_X86__VMWARE_H + +extern unsigned long vmware_get_tsc_khz(void); +extern int vmware_platform(void); +extern void vmware_set_feature_bits(struct cpuinfo_x86 *c); + +#endif diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 3f6000d95fe2..5e79ca694326 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -33,8 +33,14 @@ #ifndef _ASM_X86_XEN_HYPERCALL_H #define _ASM_X86_XEN_HYPERCALL_H +#include <linux/kernel.h> +#include <linux/spinlock.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/types.h> + +#include <asm/page.h> +#include <asm/pgtable.h> #include <xen/interface/xen.h> #include <xen/interface/sched.h> diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index a38d25ac87d2..81fbd735aec4 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -33,39 +33,10 @@ #ifndef _ASM_X86_XEN_HYPERVISOR_H #define _ASM_X86_XEN_HYPERVISOR_H -#include <linux/types.h> -#include <linux/kernel.h> - -#include <xen/interface/xen.h> -#include <xen/interface/version.h> - -#include <asm/ptrace.h> -#include <asm/page.h> -#include <asm/desc.h> -#if defined(__i386__) -# ifdef CONFIG_X86_PAE -# include <asm-generic/pgtable-nopud.h> -# else -# include <asm-generic/pgtable-nopmd.h> -# endif -#endif -#include <asm/xen/hypercall.h> - /* arch/i386/kernel/setup.c */ extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; -/* arch/i386/mach-xen/evtchn.c */ -/* Force a proper event-channel callback from Xen. */ -extern void force_evtchn_callback(void); - -/* Turn jiffies into Xen system time. */ -u64 jiffies_to_st(unsigned long jiffies); - - -#define MULTI_UVMFLAGS_INDEX 3 -#define MULTI_UVMDOMID_INDEX 4 - enum xen_domain_type { XEN_NATIVE, XEN_PV_DOMAIN, @@ -74,9 +45,15 @@ enum xen_domain_type { extern enum xen_domain_type xen_domain_type; +#ifdef CONFIG_XEN #define xen_domain() (xen_domain_type != XEN_NATIVE) -#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) +#else +#define xen_domain() (0) +#endif + +#define xen_pv_domain() (xen_domain() && xen_domain_type == XEN_PV_DOMAIN) +#define xen_hvm_domain() (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN) + #define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN) -#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index bc628998a1b9..7ef617ef1df3 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -1,11 +1,16 @@ #ifndef _ASM_X86_XEN_PAGE_H #define _ASM_X86_XEN_PAGE_H +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/spinlock.h> #include <linux/pfn.h> #include <asm/uaccess.h> +#include <asm/page.h> #include <asm/pgtable.h> +#include <xen/interface/xen.h> #include <xen/features.h> /* Xen machine address */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index a9c656f2d661..d364df03c1d6 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -12,6 +12,7 @@ CFLAGS_REMOVE_tsc.o = -pg CFLAGS_REMOVE_rtc.o = -pg CFLAGS_REMOVE_paravirt-spinlocks.o = -pg CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_early_printk.o = -pg endif # @@ -23,9 +24,9 @@ CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) CFLAGS_hpet.o := $(nostackp) CFLAGS_tsc.o := $(nostackp) -obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o +obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o -obj-y += time_$(BITS).o ioport.o ldt.o +obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o @@ -65,6 +66,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o @@ -105,6 +107,8 @@ microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o obj-$(CONFIG_MICROCODE) += microcode.o +obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o + obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 ### diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4c51a2f8fd31..65d0b72777ea 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1360,6 +1360,17 @@ static void __init acpi_process_madt(void) disable_acpi(); } } + + /* + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. + */ + if (acpi_lapic && acpi_ioapic) + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " + "information\n"); + else if (acpi_lapic) + printk(KERN_INFO "Using ACPI for processor (LAPIC) " + "configuration information\n"); #endif return; } diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0a60d60ed036..2e2da717b350 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -24,6 +24,7 @@ #include <linux/iommu-helper.h> #include <asm/proto.h> #include <asm/iommu.h> +#include <asm/gart.h> #include <asm/amd_iommu_types.h> #include <asm/amd_iommu.h> diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c6cc22815d35..c625800c55ca 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -28,6 +28,7 @@ #include <asm/amd_iommu_types.h> #include <asm/amd_iommu.h> #include <asm/iommu.h> +#include <asm/gart.h> /* * definitions for the ACPI scanning code diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 9a32b37ee2ee..676debfc1702 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -1,8 +1,9 @@ /* * Firmware replacement code. * - * Work around broken BIOSes that don't set an aperture or only set the - * aperture in the AGP bridge. + * Work around broken BIOSes that don't set an aperture, only set the + * aperture in the AGP bridge, or set too small aperture. + * * If all fails map the aperture over some low memory. This is cheaper than * doing bounce buffering. The memory is lost. This is done at early boot * because only the bootmem allocator can allocate 32+MB. diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 16f94879b525..b5229affb953 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/dmi.h> #include <linux/dmar.h> +#include <linux/ftrace.h> #include <asm/atomic.h> #include <asm/smp.h> @@ -441,6 +442,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, v = apic_read(APIC_LVTT); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); apic_write(APIC_LVTT, v); + apic_write(APIC_TMICT, 0xffffffff); break; case CLOCK_EVT_MODE_RESUME: /* Nothing to do here */ @@ -559,13 +561,13 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta) } else { res = (((u64)deltapm) * mult) >> 22; do_div(res, 1000000); - printk(KERN_WARNING "APIC calibration not consistent " + pr_warning("APIC calibration not consistent " "with PM Timer: %ldms instead of 100ms\n", (long)res); /* Correct the lapic counter value */ res = (((u64)(*delta)) * pm_100ms); do_div(res, deltapm); - printk(KERN_INFO "APIC delta adjusted to PM-Timer: " + pr_info("APIC delta adjusted to PM-Timer: " "%lu (%ld)\n", (unsigned long)res, *delta); *delta = (long)res; } @@ -645,8 +647,7 @@ static int __init calibrate_APIC_clock(void) */ if (calibration_result < (1000000 / HZ)) { local_irq_enable(); - printk(KERN_WARNING - "APIC frequency too slow, disabling apic timer\n"); + pr_warning("APIC frequency too slow, disabling apic timer\n"); return -1; } @@ -672,13 +673,9 @@ static int __init calibrate_APIC_clock(void) while (lapic_cal_loops <= LAPIC_CAL_LOOPS) cpu_relax(); - local_irq_disable(); - /* Stop the lapic timer */ lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt); - local_irq_enable(); - /* Jiffies delta */ deltaj = lapic_cal_j2 - lapic_cal_j1; apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); @@ -692,8 +689,7 @@ static int __init calibrate_APIC_clock(void) local_irq_enable(); if (levt->features & CLOCK_EVT_FEAT_DUMMY) { - printk(KERN_WARNING - "APIC timer disabled due to verification failure.\n"); + pr_warning("APIC timer disabled due to verification failure.\n"); return -1; } @@ -714,7 +710,7 @@ void __init setup_boot_APIC_clock(void) * broadcast mechanism is used. On UP systems simply ignore it. */ if (disable_apic_timer) { - printk(KERN_INFO "Disabling APIC timer\n"); + pr_info("Disabling APIC timer\n"); /* No broadcast on UP ! */ if (num_possible_cpus() > 1) { lapic_clockevent.mult = 1; @@ -741,7 +737,7 @@ void __init setup_boot_APIC_clock(void) if (nmi_watchdog != NMI_IO_APIC) lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; else - printk(KERN_WARNING "APIC timer registered as dummy," + pr_warning("APIC timer registered as dummy," " due to nmi_watchdog=%d!\n", nmi_watchdog); /* Setup the lapic or request the broadcast */ @@ -773,8 +769,7 @@ static void local_apic_timer_interrupt(void) * spurious. */ if (!evt->event_handler) { - printk(KERN_WARNING - "Spurious LAPIC timer interrupt on cpu %d\n", cpu); + pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu); /* Switch it off */ lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); return; @@ -783,11 +778,7 @@ static void local_apic_timer_interrupt(void) /* * the NMI deadlock-detector uses this. */ -#ifdef CONFIG_X86_64 - add_pda(apic_timer_irqs, 1); -#else - per_cpu(irq_stat, cpu).apic_timer_irqs++; -#endif + inc_irq_stat(apic_timer_irqs); evt->event_handler(evt); } @@ -800,7 +791,7 @@ static void local_apic_timer_interrupt(void) * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ -void smp_apic_timer_interrupt(struct pt_regs *regs) +void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -814,9 +805,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ -#ifdef CONFIG_X86_64 exit_idle(); -#endif irq_enter(); local_apic_timer_interrupt(); irq_exit(); @@ -1093,7 +1082,7 @@ static void __cpuinit lapic_setup_esr(void) unsigned int oldvalue, value, maxlvt; if (!lapic_is_integrated()) { - printk(KERN_INFO "No ESR for 82489DX.\n"); + pr_info("No ESR for 82489DX.\n"); return; } @@ -1104,7 +1093,7 @@ static void __cpuinit lapic_setup_esr(void) * ESR disabled - we can't do anything useful with the * errors anyway - mbligh */ - printk(KERN_INFO "Leaving ESR disabled.\n"); + pr_info("Leaving ESR disabled.\n"); return; } @@ -1298,7 +1287,7 @@ void check_x2apic(void) rdmsr(MSR_IA32_APICBASE, msr, msr2); if (msr & X2APIC_ENABLE) { - printk("x2apic enabled by BIOS, switching to x2apic ops\n"); + pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); x2apic_preenabled = x2apic = 1; apic_ops = &x2apic_ops; } @@ -1310,7 +1299,7 @@ void enable_x2apic(void) rdmsr(MSR_IA32_APICBASE, msr, msr2); if (!(msr & X2APIC_ENABLE)) { - printk("Enabling x2apic\n"); + pr_info("Enabling x2apic\n"); wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); } } @@ -1325,9 +1314,8 @@ void __init enable_IR_x2apic(void) return; if (!x2apic_preenabled && disable_x2apic) { - printk(KERN_INFO - "Skipped enabling x2apic and Interrupt-remapping " - "because of nox2apic\n"); + pr_info("Skipped enabling x2apic and Interrupt-remapping " + "because of nox2apic\n"); return; } @@ -1335,22 +1323,19 @@ void __init enable_IR_x2apic(void) panic("Bios already enabled x2apic, can't enforce nox2apic"); if (!x2apic_preenabled && skip_ioapic_setup) { - printk(KERN_INFO - "Skipped enabling x2apic and Interrupt-remapping " - "because of skipping io-apic setup\n"); + pr_info("Skipped enabling x2apic and Interrupt-remapping " + "because of skipping io-apic setup\n"); return; } ret = dmar_table_init(); if (ret) { - printk(KERN_INFO - "dmar_table_init() failed with %d:\n", ret); + pr_info("dmar_table_init() failed with %d:\n", ret); if (x2apic_preenabled) panic("x2apic enabled by bios. But IR enabling failed"); else - printk(KERN_INFO - "Not enabling x2apic,Intr-remapping\n"); + pr_info("Not enabling x2apic,Intr-remapping\n"); return; } @@ -1359,7 +1344,7 @@ void __init enable_IR_x2apic(void) ret = save_mask_IO_APIC_setup(); if (ret) { - printk(KERN_INFO "Saving IO-APIC state failed: %d\n", ret); + pr_info("Saving IO-APIC state failed: %d\n", ret); goto end; } @@ -1394,14 +1379,11 @@ end: if (!ret) { if (!x2apic_preenabled) - printk(KERN_INFO - "Enabled x2apic and interrupt-remapping\n"); + pr_info("Enabled x2apic and interrupt-remapping\n"); else - printk(KERN_INFO - "Enabled Interrupt-remapping\n"); + pr_info("Enabled Interrupt-remapping\n"); } else - printk(KERN_ERR - "Failed to enable Interrupt-remapping and x2apic\n"); + pr_err("Failed to enable Interrupt-remapping and x2apic\n"); #else if (!cpu_has_x2apic) return; @@ -1410,8 +1392,8 @@ end: panic("x2apic enabled prior OS handover," " enable CONFIG_INTR_REMAP"); - printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping " - " and x2apic\n"); + pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping " + " and x2apic\n"); #endif return; @@ -1428,7 +1410,7 @@ end: static int __init detect_init_APIC(void) { if (!cpu_has_apic) { - printk(KERN_INFO "No local APIC present\n"); + pr_info("No local APIC present\n"); return -1; } @@ -1469,8 +1451,8 @@ static int __init detect_init_APIC(void) * "lapic" specified. */ if (!force_enable_local_apic) { - printk(KERN_INFO "Local APIC disabled by BIOS -- " - "you can enable it with \"lapic\"\n"); + pr_info("Local APIC disabled by BIOS -- " + "you can enable it with \"lapic\"\n"); return -1; } /* @@ -1480,8 +1462,7 @@ static int __init detect_init_APIC(void) */ rdmsr(MSR_IA32_APICBASE, l, h); if (!(l & MSR_IA32_APICBASE_ENABLE)) { - printk(KERN_INFO - "Local APIC disabled by BIOS -- reenabling.\n"); + pr_info("Local APIC disabled by BIOS -- reenabling.\n"); l &= ~MSR_IA32_APICBASE_BASE; l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; wrmsr(MSR_IA32_APICBASE, l, h); @@ -1494,7 +1475,7 @@ static int __init detect_init_APIC(void) */ features = cpuid_edx(1); if (!(features & (1 << X86_FEATURE_APIC))) { - printk(KERN_WARNING "Could not enable APIC!\n"); + pr_warning("Could not enable APIC!\n"); return -1; } set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); @@ -1505,14 +1486,14 @@ static int __init detect_init_APIC(void) if (l & MSR_IA32_APICBASE_ENABLE) mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; - printk(KERN_INFO "Found and enabled local APIC!\n"); + pr_info("Found and enabled local APIC!\n"); apic_pm_activate(); return 0; no_apic: - printk(KERN_INFO "No local APIC present or hardware disabled\n"); + pr_info("No local APIC present or hardware disabled\n"); return -1; } #endif @@ -1588,12 +1569,12 @@ int __init APIC_init_uniprocessor(void) { #ifdef CONFIG_X86_64 if (disable_apic) { - printk(KERN_INFO "Apic disabled\n"); + pr_info("Apic disabled\n"); return -1; } if (!cpu_has_apic) { disable_apic = 1; - printk(KERN_INFO "Apic disabled by BIOS\n"); + pr_info("Apic disabled by BIOS\n"); return -1; } #else @@ -1605,8 +1586,8 @@ int __init APIC_init_uniprocessor(void) */ if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { - printk(KERN_ERR "BIOS bug, local APIC 0x%x not detected!...\n", - boot_cpu_physical_apicid); + pr_err("BIOS bug, local APIC 0x%x not detected!...\n", + boot_cpu_physical_apicid); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); return -1; } @@ -1682,9 +1663,7 @@ void smp_spurious_interrupt(struct pt_regs *regs) { u32 v; -#ifdef CONFIG_X86_64 exit_idle(); -#endif irq_enter(); /* * Check if this really is a spurious interrupt and ACK it @@ -1695,14 +1674,11 @@ void smp_spurious_interrupt(struct pt_regs *regs) if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); -#ifdef CONFIG_X86_64 - add_pda(irq_spurious_count, 1); -#else + inc_irq_stat(irq_spurious_count); + /* see sw-dev-man vol 3, chapter 7.4.13.5 */ - printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " - "should never happen.\n", smp_processor_id()); - __get_cpu_var(irq_stat).irq_spurious_count++; -#endif + pr_info("spurious APIC interrupt on CPU#%d, " + "should never happen.\n", smp_processor_id()); irq_exit(); } @@ -1713,9 +1689,7 @@ void smp_error_interrupt(struct pt_regs *regs) { u32 v, v1; -#ifdef CONFIG_X86_64 exit_idle(); -#endif irq_enter(); /* First tickle the hardware, only then report what went on. -- REW */ v = apic_read(APIC_ESR); @@ -1724,17 +1698,18 @@ void smp_error_interrupt(struct pt_regs *regs) ack_APIC_irq(); atomic_inc(&irq_err_count); - /* Here is what the APIC error bits mean: - 0: Send CS error - 1: Receive CS error - 2: Send accept error - 3: Receive accept error - 4: Reserved - 5: Send illegal vector - 6: Received illegal vector - 7: Illegal register address - */ - printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n", + /* + * Here is what the APIC error bits mean: + * 0: Send CS error + * 1: Receive CS error + * 2: Send accept error + * 3: Receive accept error + * 4: Reserved + * 5: Send illegal vector + * 6: Received illegal vector + * 7: Illegal register address + */ + pr_debug("APIC error on CPU%d: %02x(%02x)\n", smp_processor_id(), v , v1); irq_exit(); } @@ -1838,15 +1813,15 @@ void __cpuinit generic_processor_info(int apicid, int version) * Validate version */ if (version == 0x0) { - printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - version); + pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " + "fixing up to 0x10. (tell your hw vendor)\n", + version); version = 0x10; } apic_version[apicid] = version; if (num_processors >= NR_CPUS) { - printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." + pr_warning("WARNING: NR_CPUS limit of %i reached." " Processor ignored.\n", NR_CPUS); return; } @@ -2209,7 +2184,7 @@ static int __init apic_set_verbosity(char *arg) else if (strcmp("verbose", arg) == 0) apic_verbosity = APIC_VERBOSE; else { - printk(KERN_WARNING "APIC Verbosity level %s not recognised" + pr_warning("APIC Verbosity level %s not recognised" " use apic=verbose or apic=debug\n", arg); return -EINVAL; } diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 5145a6e72bbb..3a26525a3f31 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -391,11 +391,7 @@ static int power_off; #else static int power_off = 1; #endif -#ifdef CONFIG_APM_REAL_MODE_POWER_OFF -static int realmode_power_off = 1; -#else static int realmode_power_off; -#endif #ifdef CONFIG_APM_ALLOW_INTS static int allow_ints = 1; #else diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 6649d09ad88f..ee4df08feee6 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -11,7 +11,7 @@ #include <linux/suspend.h> #include <linux/kbuild.h> #include <asm/ucontext.h> -#include "sigframe.h" +#include <asm/sigframe.h> #include <asm/pgtable.h> #include <asm/fixmap.h> #include <asm/processor.h> diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 7fcf63d22f8b..1d41d3f1edbc 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -20,6 +20,8 @@ #include <xen/interface/xen.h> +#include <asm/sigframe.h> + #define __NO_STUBS 1 #undef __SYSCALL #undef _ASM_X86_UNISTD_64_H @@ -87,7 +89,7 @@ int main(void) BLANK(); #undef ENTRY DEFINE(IA32_RT_SIGFRAME_sigcontext, - offsetof (struct rt_sigframe32, uc.uc_mcontext)); + offsetof (struct rt_sigframe_ia32, uc.uc_mcontext)); BLANK(); #endif DEFINE(pbe_address, offsetof(struct pbe, address)); diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index f0dfe6f17e7e..2a0a2a3cac26 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c @@ -69,10 +69,10 @@ s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, long sn_partition_id; EXPORT_SYMBOL_GPL(sn_partition_id); -long uv_coherency_id; -EXPORT_SYMBOL_GPL(uv_coherency_id); -long uv_region_size; -EXPORT_SYMBOL_GPL(uv_region_size); +long sn_coherency_id; +EXPORT_SYMBOL_GPL(sn_coherency_id); +long sn_region_size; +EXPORT_SYMBOL_GPL(sn_region_size); int uv_type; @@ -100,6 +100,56 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher, return ret; } +int +uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size, + unsigned long *intr_mmr_offset) +{ + union uv_watchlist_u size_blade; + u64 watchlist; + s64 ret; + + size_blade.size = mq_size; + size_blade.blade = blade; + + /* + * bios returns watchlist number or negative error number. + */ + ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr, + size_blade.val, (u64)intr_mmr_offset, + (u64)&watchlist, 0); + if (ret < BIOS_STATUS_SUCCESS) + return ret; + + return watchlist; +} +EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc); + +int +uv_bios_mq_watchlist_free(int blade, int watchlist_num) +{ + return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE, + blade, watchlist_num, 0, 0, 0); +} +EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free); + +s64 +uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms) +{ + return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len, + perms, 0, 0); +} +EXPORT_SYMBOL_GPL(uv_bios_change_memprotect); + +s64 +uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len) +{ + s64 ret; + + ret = uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie, + (u64)addr, buf, (u64)len, 0); + return ret; +} +EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa); s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second) { diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c new file mode 100644 index 000000000000..2ac0ab71412a --- /dev/null +++ b/arch/x86/kernel/check.c @@ -0,0 +1,161 @@ +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/workqueue.h> +#include <asm/e820.h> +#include <asm/proto.h> + +/* + * Some BIOSes seem to corrupt the low 64k of memory during events + * like suspend/resume and unplugging an HDMI cable. Reserve all + * remaining free memory in that area and fill it with a distinct + * pattern. + */ +#define MAX_SCAN_AREAS 8 + +static int __read_mostly memory_corruption_check = -1; + +static unsigned __read_mostly corruption_check_size = 64*1024; +static unsigned __read_mostly corruption_check_period = 60; /* seconds */ + +static struct e820entry scan_areas[MAX_SCAN_AREAS]; +static int num_scan_areas; + + +static __init int set_corruption_check(char *arg) +{ + char *end; + + memory_corruption_check = simple_strtol(arg, &end, 10); + + return (*end == 0) ? 0 : -EINVAL; +} +early_param("memory_corruption_check", set_corruption_check); + +static __init int set_corruption_check_period(char *arg) +{ + char *end; + + corruption_check_period = simple_strtoul(arg, &end, 10); + + return (*end == 0) ? 0 : -EINVAL; +} +early_param("memory_corruption_check_period", set_corruption_check_period); + +static __init int set_corruption_check_size(char *arg) +{ + char *end; + unsigned size; + + size = memparse(arg, &end); + + if (*end == '\0') + corruption_check_size = size; + + return (size == corruption_check_size) ? 0 : -EINVAL; +} +early_param("memory_corruption_check_size", set_corruption_check_size); + + +void __init setup_bios_corruption_check(void) +{ + u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */ + + if (memory_corruption_check == -1) { + memory_corruption_check = +#ifdef CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK + 1 +#else + 0 +#endif + ; + } + + if (corruption_check_size == 0) + memory_corruption_check = 0; + + if (!memory_corruption_check) + return; + + corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); + + while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { + u64 size; + addr = find_e820_area_size(addr, &size, PAGE_SIZE); + + if (addr == 0) + break; + + if ((addr + size) > corruption_check_size) + size = corruption_check_size - addr; + + if (size == 0) + break; + + e820_update_range(addr, size, E820_RAM, E820_RESERVED); + scan_areas[num_scan_areas].addr = addr; + scan_areas[num_scan_areas].size = size; + num_scan_areas++; + + /* Assume we've already mapped this early memory */ + memset(__va(addr), 0, size); + + addr += size; + } + + printk(KERN_INFO "Scanning %d areas for low memory corruption\n", + num_scan_areas); + update_e820(); +} + + +void check_for_bios_corruption(void) +{ + int i; + int corruption = 0; + + if (!memory_corruption_check) + return; + + for (i = 0; i < num_scan_areas; i++) { + unsigned long *addr = __va(scan_areas[i].addr); + unsigned long size = scan_areas[i].size; + + for (; size; addr++, size -= sizeof(unsigned long)) { + if (!*addr) + continue; + printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n", + addr, __pa(addr), *addr); + corruption = 1; + *addr = 0; + } + } + + WARN_ONCE(corruption, KERN_ERR "Memory corruption detected in low memory\n"); +} + +static void check_corruption(struct work_struct *dummy); +static DECLARE_DELAYED_WORK(bios_check_work, check_corruption); + +static void check_corruption(struct work_struct *dummy) +{ + check_for_bios_corruption(); + schedule_delayed_work(&bios_check_work, + round_jiffies_relative(corruption_check_period*HZ)); +} + +static int start_periodic_check_for_corruption(void) +{ + if (!memory_corruption_check || corruption_check_period == 0) + return 0; + + printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", + corruption_check_period); + + /* First time we run the checks right away */ + schedule_delayed_work(&bios_check_work, 0); + return 0; +} + +module_init(start_periodic_check_for_corruption); + diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 82ec6075c057..82db7f45e2de 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -2,8 +2,14 @@ # Makefile for x86-compatible CPU details and quirks # +# Don't trace early stages of a secondary CPU boot +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_common.o = -pg +endif + obj-y := intel_cacheinfo.o addon_cpuid_features.o obj-y += proc.o capflags.o powerflags.o common.o +obj-y += vmware.o hypervisor.o obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o obj-$(CONFIG_X86_64) += bugs_64.o diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index ef8f831af823..2cf23634b6d9 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -120,9 +120,17 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); + /* + * Reinit the apicid, now that we have extended initial_apicid. + */ + c->apicid = phys_pkg_id(c->initial_apicid, 0); #else c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; c->phys_proc_id = phys_pkg_id(core_plus_mask_width); + /* + * Reinit the apicid, now that we have extended initial_apicid. + */ + c->apicid = phys_pkg_id(0); #endif c->x86_max_cores = (core_level_siblings / smp_num_siblings); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 8f1e31db2ad5..7c878f6aa919 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -283,9 +283,14 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) { early_init_amd_mc(c); - /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ - if (c->x86_power & (1<<8)) + /* + * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate + * with P/T states and does not stop in deep C-states + */ + if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSCALL32); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b9c9ea0217a9..42e0853030cb 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -36,6 +36,7 @@ #include <asm/proto.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/hypervisor.h> #include "cpu.h" @@ -703,6 +704,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) detect_ht(c); #endif + init_hypervisor(c); /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are @@ -862,7 +864,7 @@ EXPORT_SYMBOL(_cpu_pda); struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; -char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; +static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; void __cpuinit pda_init(int cpu) { @@ -903,8 +905,8 @@ void __cpuinit pda_init(int cpu) } } -char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + - DEBUG_STKSZ] __page_aligned_bss; +static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + + DEBUG_STKSZ] __page_aligned_bss; extern asmlinkage void ignore_sysret(void); diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8e48c5d4467d..88ea02dcb622 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -33,6 +33,7 @@ #include <linux/cpufreq.h> #include <linux/compiler.h> #include <linux/dmi.h> +#include <linux/ftrace.h> #include <linux/acpi.h> #include <acpi/processor.h> @@ -391,6 +392,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int i; int result = 0; + struct power_trace it; dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); @@ -427,6 +429,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } } + trace_power_mark(&it, POWER_PSTATE, next_perf_state); + switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c new file mode 100644 index 000000000000..fb5b86af0b01 --- /dev/null +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -0,0 +1,58 @@ +/* + * Common hypervisor code + * + * Copyright (C) 2008, VMware, Inc. + * Author : Alok N Kataria <akataria@vmware.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <asm/processor.h> +#include <asm/vmware.h> +#include <asm/hypervisor.h> + +static inline void __cpuinit +detect_hypervisor_vendor(struct cpuinfo_x86 *c) +{ + if (vmware_platform()) { + c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; + } else { + c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; + } +} + +unsigned long get_hypervisor_tsc_freq(void) +{ + if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) + return vmware_get_tsc_khz(); + return 0; +} + +static inline void __cpuinit +hypervisor_set_feature_bits(struct cpuinfo_x86 *c) +{ + if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) { + vmware_set_feature_bits(c); + return; + } +} + +void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) +{ + detect_hypervisor_vendor(c); + hypervisor_set_feature_bits(c); +} diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index cce0b6118d55..8ea6929e974c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -11,7 +11,6 @@ #include <asm/pgtable.h> #include <asm/msr.h> #include <asm/uaccess.h> -#include <asm/ptrace.h> #include <asm/ds.h> #include <asm/bugs.h> @@ -41,6 +40,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif + + /* + * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate + * with P/T states and does not stop in deep C-states + */ + if (c->x86_power & (1 << 8)) { + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + } + } #ifdef CONFIG_X86_32 @@ -242,6 +251,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) intel_workarounds(c); + /* + * Detect the extended topology information if available. This + * will reinitialise the initial_apicid which will be used + * in init_intel_cacheinfo() + */ + detect_extended_topology(c); + l2 = init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); @@ -307,13 +323,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_P4); if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_P3); - - if (cpu_has_bts) - ptrace_bts_init_intel(c); - #endif - detect_extended_topology(c); if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { /* * let's use the legacy cpuid vector 0x1 and 0x4 for topology diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 3f46afbb1cf1..68b5d8681cbb 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -644,20 +644,17 @@ static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf) return show_shared_cpu_map_func(leaf, 1, buf); } -static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { - switch(this_leaf->eax.split.type) { - case CACHE_TYPE_DATA: +static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) +{ + switch (this_leaf->eax.split.type) { + case CACHE_TYPE_DATA: return sprintf(buf, "Data\n"); - break; - case CACHE_TYPE_INST: + case CACHE_TYPE_INST: return sprintf(buf, "Instruction\n"); - break; - case CACHE_TYPE_UNIFIED: + case CACHE_TYPE_UNIFIED: return sprintf(buf, "Unified\n"); - break; - default: + default: return sprintf(buf, "Unknown\n"); - break; } } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 5eb390a4b2e9..748c8f9e7a05 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -237,7 +237,7 @@ asmlinkage void mce_threshold_interrupt(void) } } out: - add_pda(irq_threshold_count, 1); + inc_irq_stat(irq_threshold_count); irq_exit(); } diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index c17eaf5dd6dd..4b48f251fd39 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -26,7 +26,7 @@ asmlinkage void smp_thermal_interrupt(void) if (therm_throt_process(msr_val & 1)) mce_log_therm_throt_event(smp_processor_id(), msr_val); - add_pda(irq_thermal_count, 1); + inc_irq_stat(irq_thermal_count); irq_exit(); } diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index c78c04821ea1..1159e269e596 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -803,6 +803,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, } static struct res_range __initdata range[RANGE_NUM]; +static int __initdata nr_range; #ifdef CONFIG_MTRR_SANITIZER @@ -1206,39 +1207,43 @@ struct mtrr_cleanup_result { #define PSHIFT (PAGE_SHIFT - 10) static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; -static struct res_range __initdata range_new[RANGE_NUM]; static unsigned long __initdata min_loss_pfn[RANGE_NUM]; -static int __init mtrr_cleanup(unsigned address_bits) +static void __init print_out_mtrr_range_state(void) { - unsigned long extra_remove_base, extra_remove_size; - unsigned long base, size, def, dummy; - mtrr_type type; - int nr_range, nr_range_new; - u64 chunk_size, gran_size; - unsigned long range_sums, range_sums_new; - int index_good; - int num_reg_good; int i; + char start_factor = 'K', size_factor = 'K'; + unsigned long start_base, size_base; + mtrr_type type; - /* extra one for all 0 */ - int num[MTRR_NUM_TYPES + 1]; + for (i = 0; i < num_var_ranges; i++) { - if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) - return 0; - rdmsr(MTRRdefType_MSR, def, dummy); - def &= 0xff; - if (def != MTRR_TYPE_UNCACHABLE) - return 0; + size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); + if (!size_base) + continue; - /* get it and store it aside */ - memset(range_state, 0, sizeof(range_state)); - for (i = 0; i < num_var_ranges; i++) { - mtrr_if->get(i, &base, &size, &type); - range_state[i].base_pfn = base; - range_state[i].size_pfn = size; - range_state[i].type = type; + size_base = to_size_factor(size_base, &size_factor), + start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); + start_base = to_size_factor(start_base, &start_factor), + type = range_state[i].type; + + printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", + i, start_base, start_factor, + size_base, size_factor, + (type == MTRR_TYPE_UNCACHABLE) ? "UC" : + ((type == MTRR_TYPE_WRPROT) ? "WP" : + ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) + ); } +} + +static int __init mtrr_need_cleanup(void) +{ + int i; + mtrr_type type; + unsigned long size; + /* extra one for all 0 */ + int num[MTRR_NUM_TYPES + 1]; /* check entries number */ memset(num, 0, sizeof(num)); @@ -1263,29 +1268,133 @@ static int __init mtrr_cleanup(unsigned address_bits) num_var_ranges - num[MTRR_NUM_TYPES]) return 0; - /* print original var MTRRs at first, for debugging: */ - printk(KERN_DEBUG "original variable MTRRs\n"); - for (i = 0; i < num_var_ranges; i++) { - char start_factor = 'K', size_factor = 'K'; - unsigned long start_base, size_base; + return 1; +} - size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); - if (!size_base) - continue; +static unsigned long __initdata range_sums; +static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, + unsigned long extra_remove_base, + unsigned long extra_remove_size, + int i) +{ + int num_reg; + static struct res_range range_new[RANGE_NUM]; + static int nr_range_new; + unsigned long range_sums_new; + + /* convert ranges to var ranges state */ + num_reg = x86_setup_var_mtrrs(range, nr_range, + chunk_size, gran_size); + + /* we got new setting in range_state, check it */ + memset(range_new, 0, sizeof(range_new)); + nr_range_new = x86_get_mtrr_mem_range(range_new, 0, + extra_remove_base, extra_remove_size); + range_sums_new = sum_ranges(range_new, nr_range_new); + + result[i].chunk_sizek = chunk_size >> 10; + result[i].gran_sizek = gran_size >> 10; + result[i].num_reg = num_reg; + if (range_sums < range_sums_new) { + result[i].lose_cover_sizek = + (range_sums_new - range_sums) << PSHIFT; + result[i].bad = 1; + } else + result[i].lose_cover_sizek = + (range_sums - range_sums_new) << PSHIFT; - size_base = to_size_factor(size_base, &size_factor), - start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); - start_base = to_size_factor(start_base, &start_factor), - type = range_state[i].type; + /* double check it */ + if (!result[i].bad && !result[i].lose_cover_sizek) { + if (nr_range_new != nr_range || + memcmp(range, range_new, sizeof(range))) + result[i].bad = 1; + } - printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", - i, start_base, start_factor, - size_base, size_factor, - (type == MTRR_TYPE_UNCACHABLE) ? "UC" : - ((type == MTRR_TYPE_WRPROT) ? "WP" : - ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) - ); + if (!result[i].bad && (range_sums - range_sums_new < + min_loss_pfn[num_reg])) { + min_loss_pfn[num_reg] = + range_sums - range_sums_new; } +} + +static void __init mtrr_print_out_one_result(int i) +{ + char gran_factor, chunk_factor, lose_factor; + unsigned long gran_base, chunk_base, lose_base; + + gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), + chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), + lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), + printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", + result[i].bad ? "*BAD*" : " ", + gran_base, gran_factor, chunk_base, chunk_factor); + printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", + result[i].num_reg, result[i].bad ? "-" : "", + lose_base, lose_factor); +} + +static int __init mtrr_search_optimal_index(void) +{ + int i; + int num_reg_good; + int index_good; + + if (nr_mtrr_spare_reg >= num_var_ranges) + nr_mtrr_spare_reg = num_var_ranges - 1; + num_reg_good = -1; + for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { + if (!min_loss_pfn[i]) + num_reg_good = i; + } + + index_good = -1; + if (num_reg_good != -1) { + for (i = 0; i < NUM_RESULT; i++) { + if (!result[i].bad && + result[i].num_reg == num_reg_good && + !result[i].lose_cover_sizek) { + index_good = i; + break; + } + } + } + + return index_good; +} + + +static int __init mtrr_cleanup(unsigned address_bits) +{ + unsigned long extra_remove_base, extra_remove_size; + unsigned long base, size, def, dummy; + mtrr_type type; + u64 chunk_size, gran_size; + int index_good; + int i; + + if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) + return 0; + rdmsr(MTRRdefType_MSR, def, dummy); + def &= 0xff; + if (def != MTRR_TYPE_UNCACHABLE) + return 0; + + /* get it and store it aside */ + memset(range_state, 0, sizeof(range_state)); + for (i = 0; i < num_var_ranges; i++) { + mtrr_if->get(i, &base, &size, &type); + range_state[i].base_pfn = base; + range_state[i].size_pfn = size; + range_state[i].type = type; + } + + /* check if we need handle it and can handle it */ + if (!mtrr_need_cleanup()) + return 0; + + /* print original var MTRRs at first, for debugging: */ + printk(KERN_DEBUG "original variable MTRRs\n"); + print_out_mtrr_range_state(); memset(range, 0, sizeof(range)); extra_remove_size = 0; @@ -1309,176 +1418,64 @@ static int __init mtrr_cleanup(unsigned address_bits) range_sums >> (20 - PAGE_SHIFT)); if (mtrr_chunk_size && mtrr_gran_size) { - int num_reg; - char gran_factor, chunk_factor, lose_factor; - unsigned long gran_base, chunk_base, lose_base; - - debug_print++; - /* convert ranges to var ranges state */ - num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, - mtrr_gran_size); + i = 0; + mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, + extra_remove_base, extra_remove_size, i); - /* we got new setting in range_state, check it */ - memset(range_new, 0, sizeof(range_new)); - nr_range_new = x86_get_mtrr_mem_range(range_new, 0, - extra_remove_base, - extra_remove_size); - range_sums_new = sum_ranges(range_new, nr_range_new); + mtrr_print_out_one_result(i); - i = 0; - result[i].chunk_sizek = mtrr_chunk_size >> 10; - result[i].gran_sizek = mtrr_gran_size >> 10; - result[i].num_reg = num_reg; - if (range_sums < range_sums_new) { - result[i].lose_cover_sizek = - (range_sums_new - range_sums) << PSHIFT; - result[i].bad = 1; - } else - result[i].lose_cover_sizek = - (range_sums - range_sums_new) << PSHIFT; - - gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), - chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), - lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), - printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", - result[i].bad?"*BAD*":" ", - gran_base, gran_factor, chunk_base, chunk_factor); - printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", - result[i].num_reg, result[i].bad?"-":"", - lose_base, lose_factor); if (!result[i].bad) { set_var_mtrr_all(address_bits); return 1; } printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " "will find optimal one\n"); - debug_print--; - memset(result, 0, sizeof(result[0])); } i = 0; memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); memset(result, 0, sizeof(result)); for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { - char gran_factor; - unsigned long gran_base; - - if (debug_print) - gran_base = to_size_factor(gran_size >> 10, &gran_factor); for (chunk_size = gran_size; chunk_size < (1ULL<<32); chunk_size <<= 1) { - int num_reg; - if (debug_print) { - char chunk_factor; - unsigned long chunk_base; - - chunk_base = to_size_factor(chunk_size>>10, &chunk_factor), - printk(KERN_INFO "\n"); - printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n", - gran_base, gran_factor, chunk_base, chunk_factor); - } if (i >= NUM_RESULT) continue; - /* convert ranges to var ranges state */ - num_reg = x86_setup_var_mtrrs(range, nr_range, - chunk_size, gran_size); - - /* we got new setting in range_state, check it */ - memset(range_new, 0, sizeof(range_new)); - nr_range_new = x86_get_mtrr_mem_range(range_new, 0, - extra_remove_base, extra_remove_size); - range_sums_new = sum_ranges(range_new, nr_range_new); - - result[i].chunk_sizek = chunk_size >> 10; - result[i].gran_sizek = gran_size >> 10; - result[i].num_reg = num_reg; - if (range_sums < range_sums_new) { - result[i].lose_cover_sizek = - (range_sums_new - range_sums) << PSHIFT; - result[i].bad = 1; - } else - result[i].lose_cover_sizek = - (range_sums - range_sums_new) << PSHIFT; - - /* double check it */ - if (!result[i].bad && !result[i].lose_cover_sizek) { - if (nr_range_new != nr_range || - memcmp(range, range_new, sizeof(range))) - result[i].bad = 1; + mtrr_calc_range_state(chunk_size, gran_size, + extra_remove_base, extra_remove_size, i); + if (debug_print) { + mtrr_print_out_one_result(i); + printk(KERN_INFO "\n"); } - if (!result[i].bad && (range_sums - range_sums_new < - min_loss_pfn[num_reg])) { - min_loss_pfn[num_reg] = - range_sums - range_sums_new; - } i++; } } - /* print out all */ - for (i = 0; i < NUM_RESULT; i++) { - char gran_factor, chunk_factor, lose_factor; - unsigned long gran_base, chunk_base, lose_base; - - gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), - chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), - lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), - printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", - result[i].bad?"*BAD*":" ", - gran_base, gran_factor, chunk_base, chunk_factor); - printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", - result[i].num_reg, result[i].bad?"-":"", - lose_base, lose_factor); - } - /* try to find the optimal index */ - if (nr_mtrr_spare_reg >= num_var_ranges) - nr_mtrr_spare_reg = num_var_ranges - 1; - num_reg_good = -1; - for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { - if (!min_loss_pfn[i]) - num_reg_good = i; - } - - index_good = -1; - if (num_reg_good != -1) { - for (i = 0; i < NUM_RESULT; i++) { - if (!result[i].bad && - result[i].num_reg == num_reg_good && - !result[i].lose_cover_sizek) { - index_good = i; - break; - } - } - } + index_good = mtrr_search_optimal_index(); if (index_good != -1) { - char gran_factor, chunk_factor, lose_factor; - unsigned long gran_base, chunk_base, lose_base; - printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); i = index_good; - gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), - chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), - lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), - printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t", - gran_base, gran_factor, chunk_base, chunk_factor); - printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n", - result[i].num_reg, lose_base, lose_factor); + mtrr_print_out_one_result(i); + /* convert ranges to var ranges state */ chunk_size = result[i].chunk_sizek; chunk_size <<= 10; gran_size = result[i].gran_sizek; gran_size <<= 10; - debug_print++; x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); - debug_print--; set_var_mtrr_all(address_bits); + printk(KERN_DEBUG "New variable MTRRs\n"); + print_out_mtrr_range_state(); return 1; + } else { + /* print out all */ + for (i = 0; i < NUM_RESULT; i++) + mtrr_print_out_one_result(i); } printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); @@ -1562,7 +1559,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) { unsigned long i, base, size, highest_pfn = 0, def, dummy; mtrr_type type; - int nr_range; u64 total_trim_size; /* extra one for all 0 */ diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c new file mode 100644 index 000000000000..284c399e3234 --- /dev/null +++ b/arch/x86/kernel/cpu/vmware.c @@ -0,0 +1,112 @@ +/* + * VMware Detection code. + * + * Copyright (C) 2008, VMware, Inc. + * Author : Alok N Kataria <akataria@vmware.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/dmi.h> +#include <asm/div64.h> +#include <asm/vmware.h> + +#define CPUID_VMWARE_INFO_LEAF 0x40000000 +#define VMWARE_HYPERVISOR_MAGIC 0x564D5868 +#define VMWARE_HYPERVISOR_PORT 0x5658 + +#define VMWARE_PORT_CMD_GETVERSION 10 +#define VMWARE_PORT_CMD_GETHZ 45 + +#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ + __asm__("inl (%%dx)" : \ + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ + "0"(VMWARE_HYPERVISOR_MAGIC), \ + "1"(VMWARE_PORT_CMD_##cmd), \ + "2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \ + "memory"); + +static inline int __vmware_platform(void) +{ + uint32_t eax, ebx, ecx, edx; + VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx); + return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; +} + +static unsigned long __vmware_get_tsc_khz(void) +{ + uint64_t tsc_hz; + uint32_t eax, ebx, ecx, edx; + + VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); + + if (ebx == UINT_MAX) + return 0; + tsc_hz = eax | (((uint64_t)ebx) << 32); + do_div(tsc_hz, 1000); + BUG_ON(tsc_hz >> 32); + return tsc_hz; +} + +/* + * While checking the dmi string infomation, just checking the product + * serial key should be enough, as this will always have a VMware + * specific string when running under VMware hypervisor. + */ +int vmware_platform(void) +{ + if (cpu_has_hypervisor) { + unsigned int eax, ebx, ecx, edx; + char hyper_vendor_id[13]; + + cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx); + memcpy(hyper_vendor_id + 0, &ebx, 4); + memcpy(hyper_vendor_id + 4, &ecx, 4); + memcpy(hyper_vendor_id + 8, &edx, 4); + hyper_vendor_id[12] = '\0'; + if (!strcmp(hyper_vendor_id, "VMwareVMware")) + return 1; + } else if (dmi_available && dmi_name_in_serial("VMware") && + __vmware_platform()) + return 1; + + return 0; +} + +unsigned long vmware_get_tsc_khz(void) +{ + BUG_ON(!vmware_platform()); + return __vmware_get_tsc_khz(); +} + +/* + * VMware hypervisor takes care of exporting a reliable TSC to the guest. + * Still, due to timing difference when running on virtual cpus, the TSC can + * be marked as unstable in some cases. For example, the TSC sync check at + * bootup can fail due to a marginal offset between vcpus' TSCs (though the + * TSCs do not drift from each other). Also, the ACPI PM timer clocksource + * is not suitable as a watchdog when running on a hypervisor because the + * kernel may miss a wrap of the counter if the vcpu is descheduled for a + * long time. To skip these checks at runtime we set these capability bits, + * so that the kernel could just trust the hypervisor with providing a + * reliable virtual TSC that is suitable for timekeeping. + */ +void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c) +{ + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); +} diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 268553817909..d84a852e4cd7 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -29,34 +29,17 @@ #include <mach_ipi.h> -/* This keeps a track of which one is crashing cpu. */ -static int crashing_cpu; #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) -static atomic_t waiting_for_crash_ipi; -static int crash_nmi_callback(struct notifier_block *self, - unsigned long val, void *data) +static void kdump_nmi_callback(int cpu, struct die_args *args) { struct pt_regs *regs; #ifdef CONFIG_X86_32 struct pt_regs fixed_regs; #endif - int cpu; - if (val != DIE_NMI_IPI) - return NOTIFY_OK; - - regs = ((struct die_args *)data)->regs; - cpu = raw_smp_processor_id(); - - /* Don't do anything if this handler is invoked on crashing cpu. - * Otherwise, system will completely hang. Crashing cpu can get - * an NMI if system was initially booted with nmi_watchdog parameter. - */ - if (cpu == crashing_cpu) - return NOTIFY_STOP; - local_irq_disable(); + regs = args->regs; #ifdef CONFIG_X86_32 if (!user_mode_vm(regs)) { @@ -65,54 +48,19 @@ static int crash_nmi_callback(struct notifier_block *self, } #endif crash_save_cpu(regs, cpu); - disable_local_APIC(); - atomic_dec(&waiting_for_crash_ipi); - /* Assume hlt works */ - halt(); - for (;;) - cpu_relax(); - - return 1; -} -static void smp_send_nmi_allbutself(void) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(safe_smp_processor_id(), mask); - if (!cpus_empty(mask)) - send_IPI_mask(mask, NMI_VECTOR); + disable_local_APIC(); } -static struct notifier_block crash_nmi_nb = { - .notifier_call = crash_nmi_callback, -}; - -static void nmi_shootdown_cpus(void) +static void kdump_nmi_shootdown_cpus(void) { - unsigned long msecs; - - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - /* Would it be better to replace the trap vector here? */ - if (register_die_notifier(&crash_nmi_nb)) - return; /* return what? */ - /* Ensure the new callback function is set before sending - * out the NMI - */ - wmb(); + nmi_shootdown_cpus(kdump_nmi_callback); - smp_send_nmi_allbutself(); - - msecs = 1000; /* Wait at most a second for the other cpus to stop */ - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { - mdelay(1); - msecs--; - } - - /* Leave the nmi callback set */ disable_local_APIC(); } + #else -static void nmi_shootdown_cpus(void) +static void kdump_nmi_shootdown_cpus(void) { /* There are no cpus to shootdown */ } @@ -131,9 +79,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) /* The kernel is broken so disable interrupts */ local_irq_disable(); - /* Make a note of crashing cpu. Will be used in NMI callback.*/ - crashing_cpu = safe_smp_processor_id(); - nmi_shootdown_cpus(); + kdump_nmi_shootdown_cpus(); lapic_shutdown(); #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index a2d1176c38ee..da91701a2348 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -6,14 +6,13 @@ * precise-event based sampling (PEBS). * * It manages: - * - per-thread and per-cpu allocation of BTS and PEBS - * - buffer memory allocation (optional) - * - buffer overflow handling + * - DS and BTS hardware configuration + * - buffer overflow handling (to be done) * - buffer access * - * It assumes: - * - get_task_struct on all parameter tasks - * - current is allowed to trace parameter tasks + * It does not do: + * - security checking (is the caller allowed to trace the task) + * - buffer allocation (memory accounting) * * * Copyright (C) 2007-2008 Intel Corporation. @@ -28,22 +27,69 @@ #include <linux/slab.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/kernel.h> /* * The configuration for a particular DS hardware implementation. */ struct ds_configuration { - /* the size of the DS structure in bytes */ - unsigned char sizeof_ds; - /* the size of one pointer-typed field in the DS structure in bytes; - this covers the first 8 fields related to buffer management. */ + /* the name of the configuration */ + const char *name; + /* the size of one pointer-typed field in the DS structure and + in the BTS and PEBS buffers in bytes; + this covers the first 8 DS fields related to buffer management. */ unsigned char sizeof_field; /* the size of a BTS/PEBS record in bytes */ unsigned char sizeof_rec[2]; + /* a series of bit-masks to control various features indexed + * by enum ds_feature */ + unsigned long ctl[dsf_ctl_max]; }; -static struct ds_configuration ds_cfg; +static DEFINE_PER_CPU(struct ds_configuration, ds_cfg_array); +#define ds_cfg per_cpu(ds_cfg_array, smp_processor_id()) + +#define MAX_SIZEOF_DS (12 * 8) /* maximal size of a DS configuration */ +#define MAX_SIZEOF_BTS (3 * 8) /* maximal size of a BTS record */ +#define DS_ALIGNMENT (1 << 3) /* BTS and PEBS buffer alignment */ + +#define BTS_CONTROL \ + (ds_cfg.ctl[dsf_bts] | ds_cfg.ctl[dsf_bts_kernel] | ds_cfg.ctl[dsf_bts_user] |\ + ds_cfg.ctl[dsf_bts_overflow]) + + +/* + * A BTS or PEBS tracer. + * + * This holds the configuration of the tracer and serves as a handle + * to identify tracers. + */ +struct ds_tracer { + /* the DS context (partially) owned by this tracer */ + struct ds_context *context; + /* the buffer provided on ds_request() and its size in bytes */ + void *buffer; + size_t size; +}; + +struct bts_tracer { + /* the common DS part */ + struct ds_tracer ds; + /* the trace including the DS configuration */ + struct bts_trace trace; + /* buffer overflow notification function */ + bts_ovfl_callback_t ovfl; +}; + +struct pebs_tracer { + /* the common DS part */ + struct ds_tracer ds; + /* the trace including the DS configuration */ + struct pebs_trace trace; + /* buffer overflow notification function */ + pebs_ovfl_callback_t ovfl; +}; /* * Debug Store (DS) save area configuration (see Intel64 and IA32 @@ -109,32 +155,9 @@ static inline void ds_set(unsigned char *base, enum ds_qualifier qual, /* - * Locking is done only for allocating BTS or PEBS resources and for - * guarding context and buffer memory allocation. - * - * Most functions require the current task to own the ds context part - * they are going to access. All the locking is done when validating - * access to the context. + * Locking is done only for allocating BTS or PEBS resources. */ -static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock); - -/* - * Validate that the current task is allowed to access the BTS/PEBS - * buffer of the parameter task. - * - * Returns 0, if access is granted; -Eerrno, otherwise. - */ -static inline int ds_validate_access(struct ds_context *context, - enum ds_qualifier qual) -{ - if (!context) - return -EPERM; - - if (context->owner[qual] == current) - return 0; - - return -EPERM; -} +static DEFINE_SPINLOCK(ds_lock); /* @@ -150,27 +173,32 @@ static inline int ds_validate_access(struct ds_context *context, * >0 number of per-thread tracers * <0 number of per-cpu tracers * - * The below functions to get and put tracers and to check the - * allocation type require the ds_lock to be held by the caller. - * * Tracers essentially gives the number of ds contexts for a certain * type of allocation. */ -static long tracers; +static atomic_t tracers = ATOMIC_INIT(0); static inline void get_tracer(struct task_struct *task) { - tracers += (task ? 1 : -1); + if (task) + atomic_inc(&tracers); + else + atomic_dec(&tracers); } static inline void put_tracer(struct task_struct *task) { - tracers -= (task ? 1 : -1); + if (task) + atomic_dec(&tracers); + else + atomic_inc(&tracers); } static inline int check_tracer(struct task_struct *task) { - return (task ? (tracers >= 0) : (tracers <= 0)); + return task ? + (atomic_read(&tracers) >= 0) : + (atomic_read(&tracers) <= 0); } @@ -183,99 +211,70 @@ static inline int check_tracer(struct task_struct *task) * * Contexts are use-counted. They are allocated on first access and * deallocated when the last user puts the context. - * - * We distinguish between an allocating and a non-allocating get of a - * context: - * - the allocating get is used for requesting BTS/PEBS resources. It - * requires the caller to hold the global ds_lock. - * - the non-allocating get is used for all other cases. A - * non-existing context indicates an error. It acquires and releases - * the ds_lock itself for obtaining the context. - * - * A context and its DS configuration are allocated and deallocated - * together. A context always has a DS configuration of the - * appropriate size. - */ -static DEFINE_PER_CPU(struct ds_context *, system_context); - -#define this_system_context per_cpu(system_context, smp_processor_id()) - -/* - * Returns the pointer to the parameter task's context or to the - * system-wide context, if task is NULL. - * - * Increases the use count of the returned context, if not NULL. */ -static inline struct ds_context *ds_get_context(struct task_struct *task) -{ - struct ds_context *context; - unsigned long irq; +struct ds_context { + /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */ + unsigned char ds[MAX_SIZEOF_DS]; + /* the owner of the BTS and PEBS configuration, respectively */ + struct bts_tracer *bts_master; + struct pebs_tracer *pebs_master; + /* use count */ + unsigned long count; + /* a pointer to the context location inside the thread_struct + * or the per_cpu context array */ + struct ds_context **this; + /* a pointer to the task owning this context, or NULL, if the + * context is owned by a cpu */ + struct task_struct *task; +}; - spin_lock_irqsave(&ds_lock, irq); +static DEFINE_PER_CPU(struct ds_context *, system_context_array); - context = (task ? task->thread.ds_ctx : this_system_context); - if (context) - context->count++; +#define system_context per_cpu(system_context_array, smp_processor_id()) - spin_unlock_irqrestore(&ds_lock, irq); - - return context; -} -/* - * Same as ds_get_context, but allocates the context and it's DS - * structure, if necessary; returns NULL; if out of memory. - */ -static inline struct ds_context *ds_alloc_context(struct task_struct *task) +static inline struct ds_context *ds_get_context(struct task_struct *task) { struct ds_context **p_context = - (task ? &task->thread.ds_ctx : &this_system_context); - struct ds_context *context = *p_context; + (task ? &task->thread.ds_ctx : &system_context); + struct ds_context *context = NULL; + struct ds_context *new_context = NULL; unsigned long irq; - if (!context) { - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return NULL; - - context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); - if (!context->ds) { - kfree(context); - return NULL; - } + /* Chances are small that we already have a context. */ + new_context = kzalloc(sizeof(*new_context), GFP_KERNEL); + if (!new_context) + return NULL; - spin_lock_irqsave(&ds_lock, irq); + spin_lock_irqsave(&ds_lock, irq); - if (*p_context) { - kfree(context->ds); - kfree(context); + context = *p_context; + if (!context) { + context = new_context; - context = *p_context; - } else { - *p_context = context; + context->this = p_context; + context->task = task; + context->count = 0; - context->this = p_context; - context->task = task; + if (task) + set_tsk_thread_flag(task, TIF_DS_AREA_MSR); - if (task) - set_tsk_thread_flag(task, TIF_DS_AREA_MSR); + if (!task || (task == current)) + wrmsrl(MSR_IA32_DS_AREA, (unsigned long)context->ds); - if (!task || (task == current)) - wrmsrl(MSR_IA32_DS_AREA, - (unsigned long)context->ds); - } - spin_unlock_irqrestore(&ds_lock, irq); + *p_context = context; } context->count++; + spin_unlock_irqrestore(&ds_lock, irq); + + if (context != new_context) + kfree(new_context); + return context; } -/* - * Decreases the use count of the parameter context, if not NULL. - * Deallocates the context, if the use count reaches zero. - */ static inline void ds_put_context(struct ds_context *context) { unsigned long irq; @@ -285,8 +284,10 @@ static inline void ds_put_context(struct ds_context *context) spin_lock_irqsave(&ds_lock, irq); - if (--context->count) - goto out; + if (--context->count) { + spin_unlock_irqrestore(&ds_lock, irq); + return; + } *(context->this) = NULL; @@ -296,135 +297,263 @@ static inline void ds_put_context(struct ds_context *context) if (!context->task || (context->task == current)) wrmsrl(MSR_IA32_DS_AREA, 0); - put_tracer(context->task); + spin_unlock_irqrestore(&ds_lock, irq); - /* free any leftover buffers from tracers that did not - * deallocate them properly. */ - kfree(context->buffer[ds_bts]); - kfree(context->buffer[ds_pebs]); - kfree(context->ds); kfree(context); - out: - spin_unlock_irqrestore(&ds_lock, irq); } /* - * Handle a buffer overflow + * Call the tracer's callback on a buffer overflow. * - * task: the task whose buffers are overflowing; - * NULL for a buffer overflow on the current cpu * context: the ds context * qual: the buffer type */ -static void ds_overflow(struct task_struct *task, struct ds_context *context, - enum ds_qualifier qual) +static void ds_overflow(struct ds_context *context, enum ds_qualifier qual) { - if (!context) - return; - - if (context->callback[qual]) - (*context->callback[qual])(task); - - /* todo: do some more overflow handling */ + switch (qual) { + case ds_bts: + if (context->bts_master && + context->bts_master->ovfl) + context->bts_master->ovfl(context->bts_master); + break; + case ds_pebs: + if (context->pebs_master && + context->pebs_master->ovfl) + context->pebs_master->ovfl(context->pebs_master); + break; + } } /* - * Allocate a non-pageable buffer of the parameter size. - * Checks the memory and the locked memory rlimit. + * Write raw data into the BTS or PEBS buffer. * - * Returns the buffer, if successful; - * NULL, if out of memory or rlimit exceeded. + * The remainder of any partially written record is zeroed out. * - * size: the requested buffer size in bytes - * pages (out): if not NULL, contains the number of pages reserved + * context: the DS context + * qual: the buffer type + * record: the data to write + * size: the size of the data */ -static inline void *ds_allocate_buffer(size_t size, unsigned int *pages) +static int ds_write(struct ds_context *context, enum ds_qualifier qual, + const void *record, size_t size) { - unsigned long rlim, vm, pgsz; - void *buffer; + int bytes_written = 0; - pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; + if (!record) + return -EINVAL; - rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; - vm = current->mm->total_vm + pgsz; - if (rlim < vm) - return NULL; + while (size) { + unsigned long base, index, end, write_end, int_th; + unsigned long write_size, adj_write_size; - rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; - vm = current->mm->locked_vm + pgsz; - if (rlim < vm) - return NULL; + /* + * write as much as possible without producing an + * overflow interrupt. + * + * interrupt_threshold must either be + * - bigger than absolute_maximum or + * - point to a record between buffer_base and absolute_maximum + * + * index points to a valid record. + */ + base = ds_get(context->ds, qual, ds_buffer_base); + index = ds_get(context->ds, qual, ds_index); + end = ds_get(context->ds, qual, ds_absolute_maximum); + int_th = ds_get(context->ds, qual, ds_interrupt_threshold); - buffer = kzalloc(size, GFP_KERNEL); - if (!buffer) - return NULL; + write_end = min(end, int_th); - current->mm->total_vm += pgsz; - current->mm->locked_vm += pgsz; + /* if we are already beyond the interrupt threshold, + * we fill the entire buffer */ + if (write_end <= index) + write_end = end; - if (pages) - *pages = pgsz; + if (write_end <= index) + break; + + write_size = min((unsigned long) size, write_end - index); + memcpy((void *)index, record, write_size); - return buffer; + record = (const char *)record + write_size; + size -= write_size; + bytes_written += write_size; + + adj_write_size = write_size / ds_cfg.sizeof_rec[qual]; + adj_write_size *= ds_cfg.sizeof_rec[qual]; + + /* zero out trailing bytes */ + memset((char *)index + write_size, 0, + adj_write_size - write_size); + index += adj_write_size; + + if (index >= end) + index = base; + ds_set(context->ds, qual, ds_index, index); + + if (index >= int_th) + ds_overflow(context, qual); + } + + return bytes_written; } -static int ds_request(struct task_struct *task, void *base, size_t size, - ds_ovfl_callback_t ovfl, enum ds_qualifier qual) + +/* + * Branch Trace Store (BTS) uses the following format. Different + * architectures vary in the size of those fields. + * - source linear address + * - destination linear address + * - flags + * + * Later architectures use 64bit pointers throughout, whereas earlier + * architectures use 32bit pointers in 32bit mode. + * + * We compute the base address for the first 8 fields based on: + * - the field size stored in the DS configuration + * - the relative field position + * + * In order to store additional information in the BTS buffer, we use + * a special source address to indicate that the record requires + * special interpretation. + * + * Netburst indicated via a bit in the flags field whether the branch + * was predicted; this is ignored. + * + * We use two levels of abstraction: + * - the raw data level defined here + * - an arch-independent level defined in ds.h + */ + +enum bts_field { + bts_from, + bts_to, + bts_flags, + + bts_qual = bts_from, + bts_jiffies = bts_to, + bts_pid = bts_flags, + + bts_qual_mask = (bts_qual_max - 1), + bts_escape = ((unsigned long)-1 & ~bts_qual_mask) +}; + +static inline unsigned long bts_get(const char *base, enum bts_field field) { - struct ds_context *context; - unsigned long buffer, adj; - const unsigned long alignment = (1 << 3); - unsigned long irq; - int error = 0; + base += (ds_cfg.sizeof_field * field); + return *(unsigned long *)base; +} + +static inline void bts_set(char *base, enum bts_field field, unsigned long val) +{ + base += (ds_cfg.sizeof_field * field);; + (*(unsigned long *)base) = val; +} - if (!ds_cfg.sizeof_ds) - return -EOPNOTSUPP; - /* we require some space to do alignment adjustments below */ - if (size < (alignment + ds_cfg.sizeof_rec[qual])) +/* + * The raw BTS data is architecture dependent. + * + * For higher-level users, we give an arch-independent view. + * - ds.h defines struct bts_struct + * - bts_read translates one raw bts record into a bts_struct + * - bts_write translates one bts_struct into the raw format and + * writes it into the top of the parameter tracer's buffer. + * + * return: bytes read/written on success; -Eerrno, otherwise + */ +static int bts_read(struct bts_tracer *tracer, const void *at, + struct bts_struct *out) +{ + if (!tracer) return -EINVAL; - /* buffer overflow notification is not yet implemented */ - if (ovfl) - return -EOPNOTSUPP; + if (at < tracer->trace.ds.begin) + return -EINVAL; + if (tracer->trace.ds.end < (at + tracer->trace.ds.size)) + return -EINVAL; - context = ds_alloc_context(task); - if (!context) - return -ENOMEM; + memset(out, 0, sizeof(*out)); + if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) { + out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask); + out->variant.timestamp.jiffies = bts_get(at, bts_jiffies); + out->variant.timestamp.pid = bts_get(at, bts_pid); + } else { + out->qualifier = bts_branch; + out->variant.lbr.from = bts_get(at, bts_from); + out->variant.lbr.to = bts_get(at, bts_to); + + if (!out->variant.lbr.from && !out->variant.lbr.to) + out->qualifier = bts_invalid; + } - spin_lock_irqsave(&ds_lock, irq); + return ds_cfg.sizeof_rec[ds_bts]; +} - error = -EPERM; - if (!check_tracer(task)) - goto out_unlock; +static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in) +{ + unsigned char raw[MAX_SIZEOF_BTS]; - get_tracer(task); + if (!tracer) + return -EINVAL; - error = -EALREADY; - if (context->owner[qual] == current) - goto out_put_tracer; - error = -EPERM; - if (context->owner[qual] != NULL) - goto out_put_tracer; - context->owner[qual] = current; + if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts]) + return -EOVERFLOW; - spin_unlock_irqrestore(&ds_lock, irq); + switch (in->qualifier) { + case bts_invalid: + bts_set(raw, bts_from, 0); + bts_set(raw, bts_to, 0); + bts_set(raw, bts_flags, 0); + break; + case bts_branch: + bts_set(raw, bts_from, in->variant.lbr.from); + bts_set(raw, bts_to, in->variant.lbr.to); + bts_set(raw, bts_flags, 0); + break; + case bts_task_arrives: + case bts_task_departs: + bts_set(raw, bts_qual, (bts_escape | in->qualifier)); + bts_set(raw, bts_jiffies, in->variant.timestamp.jiffies); + bts_set(raw, bts_pid, in->variant.timestamp.pid); + break; + default: + return -EINVAL; + } + return ds_write(tracer->ds.context, ds_bts, raw, + ds_cfg.sizeof_rec[ds_bts]); +} - error = -ENOMEM; - if (!base) { - base = ds_allocate_buffer(size, &context->pages[qual]); - if (!base) - goto out_release; - context->buffer[qual] = base; - } - error = 0; +static void ds_write_config(struct ds_context *context, + struct ds_trace *cfg, enum ds_qualifier qual) +{ + unsigned char *ds = context->ds; + + ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin); + ds_set(ds, qual, ds_index, (unsigned long)cfg->top); + ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end); + ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith); +} + +static void ds_read_config(struct ds_context *context, + struct ds_trace *cfg, enum ds_qualifier qual) +{ + unsigned char *ds = context->ds; - context->callback[qual] = ovfl; + cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base); + cfg->top = (void *)ds_get(ds, qual, ds_index); + cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum); + cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold); +} + +static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual, + void *base, size_t size, size_t ith, + unsigned int flags) { + unsigned long buffer, adj; /* adjust the buffer address and size to meet alignment * constraints: @@ -436,410 +565,383 @@ static int ds_request(struct task_struct *task, void *base, size_t size, */ buffer = (unsigned long)base; - adj = ALIGN(buffer, alignment) - buffer; + adj = ALIGN(buffer, DS_ALIGNMENT) - buffer; buffer += adj; size -= adj; - size /= ds_cfg.sizeof_rec[qual]; - size *= ds_cfg.sizeof_rec[qual]; - - ds_set(context->ds, qual, ds_buffer_base, buffer); - ds_set(context->ds, qual, ds_index, buffer); - ds_set(context->ds, qual, ds_absolute_maximum, buffer + size); + trace->n = size / ds_cfg.sizeof_rec[qual]; + trace->size = ds_cfg.sizeof_rec[qual]; - if (ovfl) { - /* todo: select a suitable interrupt threshold */ - } else - ds_set(context->ds, qual, - ds_interrupt_threshold, buffer + size + 1); + size = (trace->n * trace->size); - /* we keep the context until ds_release */ - return error; - - out_release: - context->owner[qual] = NULL; - ds_put_context(context); - put_tracer(task); - return error; - - out_put_tracer: - spin_unlock_irqrestore(&ds_lock, irq); - ds_put_context(context); - put_tracer(task); - return error; + trace->begin = (void *)buffer; + trace->top = trace->begin; + trace->end = (void *)(buffer + size); + /* The value for 'no threshold' is -1, which will set the + * threshold outside of the buffer, just like we want it. + */ + trace->ith = (void *)(buffer + size - ith); - out_unlock: - spin_unlock_irqrestore(&ds_lock, irq); - ds_put_context(context); - return error; + trace->flags = flags; } -int ds_request_bts(struct task_struct *task, void *base, size_t size, - ds_ovfl_callback_t ovfl) -{ - return ds_request(task, base, size, ovfl, ds_bts); -} -int ds_request_pebs(struct task_struct *task, void *base, size_t size, - ds_ovfl_callback_t ovfl) -{ - return ds_request(task, base, size, ovfl, ds_pebs); -} - -static int ds_release(struct task_struct *task, enum ds_qualifier qual) +static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace, + enum ds_qualifier qual, struct task_struct *task, + void *base, size_t size, size_t th, unsigned int flags) { struct ds_context *context; int error; - context = ds_get_context(task); - error = ds_validate_access(context, qual); - if (error < 0) + error = -EINVAL; + if (!base) goto out; - kfree(context->buffer[qual]); - context->buffer[qual] = NULL; - - current->mm->total_vm -= context->pages[qual]; - current->mm->locked_vm -= context->pages[qual]; - context->pages[qual] = 0; - context->owner[qual] = NULL; - - /* - * we put the context twice: - * once for the ds_get_context - * once for the corresponding ds_request - */ - ds_put_context(context); - out: - ds_put_context(context); - return error; -} + /* we require some space to do alignment adjustments below */ + error = -EINVAL; + if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual])) + goto out; -int ds_release_bts(struct task_struct *task) -{ - return ds_release(task, ds_bts); -} + if (th != (size_t)-1) { + th *= ds_cfg.sizeof_rec[qual]; -int ds_release_pebs(struct task_struct *task) -{ - return ds_release(task, ds_pebs); -} + error = -EINVAL; + if (size <= th) + goto out; + } -static int ds_get_index(struct task_struct *task, size_t *pos, - enum ds_qualifier qual) -{ - struct ds_context *context; - unsigned long base, index; - int error; + tracer->buffer = base; + tracer->size = size; + error = -ENOMEM; context = ds_get_context(task); - error = ds_validate_access(context, qual); - if (error < 0) + if (!context) goto out; + tracer->context = context; - base = ds_get(context->ds, qual, ds_buffer_base); - index = ds_get(context->ds, qual, ds_index); + ds_init_ds_trace(trace, qual, base, size, th, flags); - error = ((index - base) / ds_cfg.sizeof_rec[qual]); - if (pos) - *pos = error; + error = 0; out: - ds_put_context(context); return error; } -int ds_get_bts_index(struct task_struct *task, size_t *pos) -{ - return ds_get_index(task, pos, ds_bts); -} - -int ds_get_pebs_index(struct task_struct *task, size_t *pos) +struct bts_tracer *ds_request_bts(struct task_struct *task, + void *base, size_t size, + bts_ovfl_callback_t ovfl, size_t th, + unsigned int flags) { - return ds_get_index(task, pos, ds_pebs); -} - -static int ds_get_end(struct task_struct *task, size_t *pos, - enum ds_qualifier qual) -{ - struct ds_context *context; - unsigned long base, end; + struct bts_tracer *tracer; + unsigned long irq; int error; - context = ds_get_context(task); - error = ds_validate_access(context, qual); - if (error < 0) + error = -EOPNOTSUPP; + if (!ds_cfg.ctl[dsf_bts]) goto out; - base = ds_get(context->ds, qual, ds_buffer_base); - end = ds_get(context->ds, qual, ds_absolute_maximum); + /* buffer overflow notification is not yet implemented */ + error = -EOPNOTSUPP; + if (ovfl) + goto out; - error = ((end - base) / ds_cfg.sizeof_rec[qual]); - if (pos) - *pos = error; - out: - ds_put_context(context); - return error; -} + error = -ENOMEM; + tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + if (!tracer) + goto out; + tracer->ovfl = ovfl; -int ds_get_bts_end(struct task_struct *task, size_t *pos) -{ - return ds_get_end(task, pos, ds_bts); -} + error = ds_request(&tracer->ds, &tracer->trace.ds, + ds_bts, task, base, size, th, flags); + if (error < 0) + goto out_tracer; -int ds_get_pebs_end(struct task_struct *task, size_t *pos) -{ - return ds_get_end(task, pos, ds_pebs); -} -static int ds_access(struct task_struct *task, size_t index, - const void **record, enum ds_qualifier qual) -{ - struct ds_context *context; - unsigned long base, idx; - int error; + spin_lock_irqsave(&ds_lock, irq); - if (!record) - return -EINVAL; + error = -EPERM; + if (!check_tracer(task)) + goto out_unlock; + get_tracer(task); - context = ds_get_context(task); - error = ds_validate_access(context, qual); - if (error < 0) - goto out; + error = -EPERM; + if (tracer->ds.context->bts_master) + goto out_put_tracer; + tracer->ds.context->bts_master = tracer; - base = ds_get(context->ds, qual, ds_buffer_base); - idx = base + (index * ds_cfg.sizeof_rec[qual]); + spin_unlock_irqrestore(&ds_lock, irq); - error = -EINVAL; - if (idx > ds_get(context->ds, qual, ds_absolute_maximum)) - goto out; - *record = (const void *)idx; - error = ds_cfg.sizeof_rec[qual]; - out: - ds_put_context(context); - return error; -} + tracer->trace.read = bts_read; + tracer->trace.write = bts_write; -int ds_access_bts(struct task_struct *task, size_t index, const void **record) -{ - return ds_access(task, index, record, ds_bts); -} + ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts); + ds_resume_bts(tracer); -int ds_access_pebs(struct task_struct *task, size_t index, const void **record) -{ - return ds_access(task, index, record, ds_pebs); + return tracer; + + out_put_tracer: + put_tracer(task); + out_unlock: + spin_unlock_irqrestore(&ds_lock, irq); + ds_put_context(tracer->ds.context); + out_tracer: + kfree(tracer); + out: + return ERR_PTR(error); } -static int ds_write(struct task_struct *task, const void *record, size_t size, - enum ds_qualifier qual, int force) +struct pebs_tracer *ds_request_pebs(struct task_struct *task, + void *base, size_t size, + pebs_ovfl_callback_t ovfl, size_t th, + unsigned int flags) { - struct ds_context *context; + struct pebs_tracer *tracer; + unsigned long irq; int error; - if (!record) - return -EINVAL; + /* buffer overflow notification is not yet implemented */ + error = -EOPNOTSUPP; + if (ovfl) + goto out; - error = -EPERM; - context = ds_get_context(task); - if (!context) + error = -ENOMEM; + tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + if (!tracer) goto out; + tracer->ovfl = ovfl; - if (!force) { - error = ds_validate_access(context, qual); - if (error < 0) - goto out; - } + error = ds_request(&tracer->ds, &tracer->trace.ds, + ds_pebs, task, base, size, th, flags); + if (error < 0) + goto out_tracer; - error = 0; - while (size) { - unsigned long base, index, end, write_end, int_th; - unsigned long write_size, adj_write_size; + spin_lock_irqsave(&ds_lock, irq); - /* - * write as much as possible without producing an - * overflow interrupt. - * - * interrupt_threshold must either be - * - bigger than absolute_maximum or - * - point to a record between buffer_base and absolute_maximum - * - * index points to a valid record. - */ - base = ds_get(context->ds, qual, ds_buffer_base); - index = ds_get(context->ds, qual, ds_index); - end = ds_get(context->ds, qual, ds_absolute_maximum); - int_th = ds_get(context->ds, qual, ds_interrupt_threshold); + error = -EPERM; + if (!check_tracer(task)) + goto out_unlock; + get_tracer(task); - write_end = min(end, int_th); + error = -EPERM; + if (tracer->ds.context->pebs_master) + goto out_put_tracer; + tracer->ds.context->pebs_master = tracer; - /* if we are already beyond the interrupt threshold, - * we fill the entire buffer */ - if (write_end <= index) - write_end = end; + spin_unlock_irqrestore(&ds_lock, irq); - if (write_end <= index) - goto out; + ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts); + ds_resume_pebs(tracer); - write_size = min((unsigned long) size, write_end - index); - memcpy((void *)index, record, write_size); + return tracer; - record = (const char *)record + write_size; - size -= write_size; - error += write_size; + out_put_tracer: + put_tracer(task); + out_unlock: + spin_unlock_irqrestore(&ds_lock, irq); + ds_put_context(tracer->ds.context); + out_tracer: + kfree(tracer); + out: + return ERR_PTR(error); +} - adj_write_size = write_size / ds_cfg.sizeof_rec[qual]; - adj_write_size *= ds_cfg.sizeof_rec[qual]; +void ds_release_bts(struct bts_tracer *tracer) +{ + if (!tracer) + return; - /* zero out trailing bytes */ - memset((char *)index + write_size, 0, - adj_write_size - write_size); - index += adj_write_size; + ds_suspend_bts(tracer); - if (index >= end) - index = base; - ds_set(context->ds, qual, ds_index, index); + WARN_ON_ONCE(tracer->ds.context->bts_master != tracer); + tracer->ds.context->bts_master = NULL; - if (index >= int_th) - ds_overflow(task, context, qual); - } + put_tracer(tracer->ds.context->task); + ds_put_context(tracer->ds.context); - out: - ds_put_context(context); - return error; + kfree(tracer); } -int ds_write_bts(struct task_struct *task, const void *record, size_t size) +void ds_suspend_bts(struct bts_tracer *tracer) { - return ds_write(task, record, size, ds_bts, /* force = */ 0); -} + struct task_struct *task; -int ds_write_pebs(struct task_struct *task, const void *record, size_t size) -{ - return ds_write(task, record, size, ds_pebs, /* force = */ 0); -} + if (!tracer) + return; -int ds_unchecked_write_bts(struct task_struct *task, - const void *record, size_t size) -{ - return ds_write(task, record, size, ds_bts, /* force = */ 1); -} + task = tracer->ds.context->task; -int ds_unchecked_write_pebs(struct task_struct *task, - const void *record, size_t size) -{ - return ds_write(task, record, size, ds_pebs, /* force = */ 1); + if (!task || (task == current)) + update_debugctlmsr(get_debugctlmsr() & ~BTS_CONTROL); + + if (task) { + task->thread.debugctlmsr &= ~BTS_CONTROL; + + if (!task->thread.debugctlmsr) + clear_tsk_thread_flag(task, TIF_DEBUGCTLMSR); + } } -static int ds_reset_or_clear(struct task_struct *task, - enum ds_qualifier qual, int clear) +void ds_resume_bts(struct bts_tracer *tracer) { - struct ds_context *context; - unsigned long base, end; - int error; + struct task_struct *task; + unsigned long control; - context = ds_get_context(task); - error = ds_validate_access(context, qual); - if (error < 0) - goto out; + if (!tracer) + return; - base = ds_get(context->ds, qual, ds_buffer_base); - end = ds_get(context->ds, qual, ds_absolute_maximum); + task = tracer->ds.context->task; - if (clear) - memset((void *)base, 0, end - base); + control = ds_cfg.ctl[dsf_bts]; + if (!(tracer->trace.ds.flags & BTS_KERNEL)) + control |= ds_cfg.ctl[dsf_bts_kernel]; + if (!(tracer->trace.ds.flags & BTS_USER)) + control |= ds_cfg.ctl[dsf_bts_user]; - ds_set(context->ds, qual, ds_index, base); + if (task) { + task->thread.debugctlmsr |= control; + set_tsk_thread_flag(task, TIF_DEBUGCTLMSR); + } - error = 0; - out: - ds_put_context(context); - return error; + if (!task || (task == current)) + update_debugctlmsr(get_debugctlmsr() | control); } -int ds_reset_bts(struct task_struct *task) +void ds_release_pebs(struct pebs_tracer *tracer) { - return ds_reset_or_clear(task, ds_bts, /* clear = */ 0); + if (!tracer) + return; + + ds_suspend_pebs(tracer); + + WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer); + tracer->ds.context->pebs_master = NULL; + + put_tracer(tracer->ds.context->task); + ds_put_context(tracer->ds.context); + + kfree(tracer); } -int ds_reset_pebs(struct task_struct *task) +void ds_suspend_pebs(struct pebs_tracer *tracer) { - return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0); + } -int ds_clear_bts(struct task_struct *task) +void ds_resume_pebs(struct pebs_tracer *tracer) { - return ds_reset_or_clear(task, ds_bts, /* clear = */ 1); + } -int ds_clear_pebs(struct task_struct *task) +const struct bts_trace *ds_read_bts(struct bts_tracer *tracer) { - return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1); + if (!tracer) + return NULL; + + ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts); + return &tracer->trace; } -int ds_get_pebs_reset(struct task_struct *task, u64 *value) +const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer) { - struct ds_context *context; - int error; + if (!tracer) + return NULL; + + ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs); + tracer->trace.reset_value = + *(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8)); - if (!value) + return &tracer->trace; +} + +int ds_reset_bts(struct bts_tracer *tracer) +{ + if (!tracer) return -EINVAL; - context = ds_get_context(task); - error = ds_validate_access(context, ds_pebs); - if (error < 0) - goto out; + tracer->trace.ds.top = tracer->trace.ds.begin; - *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)); + ds_set(tracer->ds.context->ds, ds_bts, ds_index, + (unsigned long)tracer->trace.ds.top); - error = 0; - out: - ds_put_context(context); - return error; + return 0; } -int ds_set_pebs_reset(struct task_struct *task, u64 value) +int ds_reset_pebs(struct pebs_tracer *tracer) { - struct ds_context *context; - int error; + if (!tracer) + return -EINVAL; - context = ds_get_context(task); - error = ds_validate_access(context, ds_pebs); - if (error < 0) - goto out; + tracer->trace.ds.top = tracer->trace.ds.begin; - *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value; + ds_set(tracer->ds.context->ds, ds_bts, ds_index, + (unsigned long)tracer->trace.ds.top); - error = 0; - out: - ds_put_context(context); - return error; + return 0; +} + +int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value) +{ + if (!tracer) + return -EINVAL; + + *(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8)) = value; + + return 0; } -static const struct ds_configuration ds_cfg_var = { - .sizeof_ds = sizeof(long) * 12, - .sizeof_field = sizeof(long), - .sizeof_rec[ds_bts] = sizeof(long) * 3, +static const struct ds_configuration ds_cfg_netburst = { + .name = "netburst", + .ctl[dsf_bts] = (1 << 2) | (1 << 3), + .ctl[dsf_bts_kernel] = (1 << 5), + .ctl[dsf_bts_user] = (1 << 6), + + .sizeof_field = sizeof(long), + .sizeof_rec[ds_bts] = sizeof(long) * 3, #ifdef __i386__ - .sizeof_rec[ds_pebs] = sizeof(long) * 10 + .sizeof_rec[ds_pebs] = sizeof(long) * 10, #else - .sizeof_rec[ds_pebs] = sizeof(long) * 18 + .sizeof_rec[ds_pebs] = sizeof(long) * 18, #endif }; -static const struct ds_configuration ds_cfg_64 = { - .sizeof_ds = 8 * 12, - .sizeof_field = 8, - .sizeof_rec[ds_bts] = 8 * 3, +static const struct ds_configuration ds_cfg_pentium_m = { + .name = "pentium m", + .ctl[dsf_bts] = (1 << 6) | (1 << 7), + + .sizeof_field = sizeof(long), + .sizeof_rec[ds_bts] = sizeof(long) * 3, #ifdef __i386__ - .sizeof_rec[ds_pebs] = 8 * 10 + .sizeof_rec[ds_pebs] = sizeof(long) * 10, #else - .sizeof_rec[ds_pebs] = 8 * 18 + .sizeof_rec[ds_pebs] = sizeof(long) * 18, #endif }; +static const struct ds_configuration ds_cfg_core2 = { + .name = "core 2", + .ctl[dsf_bts] = (1 << 6) | (1 << 7), + .ctl[dsf_bts_kernel] = (1 << 9), + .ctl[dsf_bts_user] = (1 << 10), + + .sizeof_field = 8, + .sizeof_rec[ds_bts] = 8 * 3, + .sizeof_rec[ds_pebs] = 8 * 18, +}; -static inline void +static void ds_configure(const struct ds_configuration *cfg) { + memset(&ds_cfg, 0, sizeof(ds_cfg)); ds_cfg = *cfg; + + printk(KERN_INFO "[ds] using %s configuration\n", ds_cfg.name); + + if (!cpu_has_bts) { + ds_cfg.ctl[dsf_bts] = 0; + printk(KERN_INFO "[ds] bts not available\n"); + } + if (!cpu_has_pebs) + printk(KERN_INFO "[ds] pebs not available\n"); + + WARN_ON_ONCE(MAX_SIZEOF_DS < (12 * ds_cfg.sizeof_field)); } void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) @@ -847,16 +949,15 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) switch (c->x86) { case 0x6: switch (c->x86_model) { + case 0 ... 0xC: + /* sorry, don't know about them */ + break; case 0xD: case 0xE: /* Pentium M */ - ds_configure(&ds_cfg_var); + ds_configure(&ds_cfg_pentium_m); break; - case 0xF: /* Core2 */ - case 0x1C: /* Atom */ - ds_configure(&ds_cfg_64); - break; - default: - /* sorry, don't know about them */ + default: /* Core2, Atom, ... */ + ds_configure(&ds_cfg_core2); break; } break; @@ -865,7 +966,7 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) case 0x0: case 0x1: case 0x2: /* Netburst */ - ds_configure(&ds_cfg_var); + ds_configure(&ds_cfg_netburst); break; default: /* sorry, don't know about them */ @@ -878,12 +979,52 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) } } -void ds_free(struct ds_context *context) +/* + * Change the DS configuration from tracing prev to tracing next. + */ +void ds_switch_to(struct task_struct *prev, struct task_struct *next) +{ + struct ds_context *prev_ctx = prev->thread.ds_ctx; + struct ds_context *next_ctx = next->thread.ds_ctx; + + if (prev_ctx) { + update_debugctlmsr(0); + + if (prev_ctx->bts_master && + (prev_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) { + struct bts_struct ts = { + .qualifier = bts_task_departs, + .variant.timestamp.jiffies = jiffies_64, + .variant.timestamp.pid = prev->pid + }; + bts_write(prev_ctx->bts_master, &ts); + } + } + + if (next_ctx) { + if (next_ctx->bts_master && + (next_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) { + struct bts_struct ts = { + .qualifier = bts_task_arrives, + .variant.timestamp.jiffies = jiffies_64, + .variant.timestamp.pid = next->pid + }; + bts_write(next_ctx->bts_master, &ts); + } + + wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds); + } + + update_debugctlmsr(next->thread.debugctlmsr); +} + +void ds_copy_thread(struct task_struct *tsk, struct task_struct *father) +{ + clear_tsk_thread_flag(tsk, TIF_DS_AREA_MSR); + tsk->thread.ds_ctx = NULL; +} + +void ds_exit_thread(struct task_struct *tsk) { - /* This is called when the task owning the parameter context - * is dying. There should not be any user of that context left - * to disturb us, anymore. */ - unsigned long leftovers = context->count; - while (leftovers--) - ds_put_context(context); + WARN_ON(tsk->thread.ds_ctx); } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c new file mode 100644 index 000000000000..6b1f6f6f8661 --- /dev/null +++ b/arch/x86/kernel/dumpstack.c @@ -0,0 +1,351 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + */ +#include <linux/kallsyms.h> +#include <linux/kprobes.h> +#include <linux/uaccess.h> +#include <linux/utsname.h> +#include <linux/hardirq.h> +#include <linux/kdebug.h> +#include <linux/module.h> +#include <linux/ptrace.h> +#include <linux/kexec.h> +#include <linux/bug.h> +#include <linux/nmi.h> +#include <linux/sysfs.h> + +#include <asm/stacktrace.h> + +#include "dumpstack.h" + +int panic_on_unrecovered_nmi; +unsigned int code_bytes = 64; +int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; +static int die_counter; + +void printk_address(unsigned long address, int reliable) +{ + printk(" [<%p>] %s%pS\n", (void *) address, + reliable ? "" : "? ", (void *) address); +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ + struct task_struct *task = tinfo->task; + unsigned long ret_addr; + int index = task->curr_ret_stack; + + if (addr != (unsigned long)return_to_handler) + return; + + if (!task->ret_stack || index < *graph) + return; + + index -= *graph; + ret_addr = task->ret_stack[index].ret; + + ops->address(data, ret_addr, 1); + + (*graph)++; +} +#else +static inline void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ } +#endif + +/* + * x86-64 can have up to three kernel stacks: + * process stack + * interrupt stack + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack + */ + +static inline int valid_stack_ptr(struct thread_info *tinfo, + void *p, unsigned int size, void *end) +{ + void *t = tinfo; + if (end) { + if (p < end && p >= (end-THREAD_SIZE)) + return 1; + else + return 0; + } + return p > t && p < t + THREAD_SIZE - size; +} + +unsigned long +print_context_stack(struct thread_info *tinfo, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) +{ + struct stack_frame *frame = (struct stack_frame *)bp; + + while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { + unsigned long addr; + + addr = *stack; + if (__kernel_text_address(addr)) { + if ((unsigned long) stack == bp + sizeof(long)) { + ops->address(data, addr, 1); + frame = frame->next_frame; + bp = (unsigned long) frame; + } else { + ops->address(data, addr, bp == 0); + } + print_ftrace_graph_addr(addr, data, ops, tinfo, graph); + } + stack++; + } + return bp; +} + + +static void +print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + printk(data); + print_symbol(msg, symbol); + printk("\n"); +} + +static void print_trace_warning(void *data, char *msg) +{ + printk("%s%s\n", (char *)data, msg); +} + +static int print_trace_stack(void *data, char *name) +{ + printk("%s <%s> ", (char *)data, name); + return 0; +} + +/* + * Print one address/symbol entries per line. + */ +static void print_trace_address(void *data, unsigned long addr, int reliable) +{ + touch_nmi_watchdog(); + printk(data); + printk_address(addr, reliable); +} + +static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, + .stack = print_trace_stack, + .address = print_trace_address, +}; + +void +show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, unsigned long bp, char *log_lvl) +{ + printk("%sCall Trace:\n", log_lvl); + dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); +} + +void show_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, unsigned long bp) +{ + show_trace_log_lvl(task, regs, stack, bp, ""); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_log_lvl(task, NULL, sp, 0, ""); +} + +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + unsigned long bp = 0; + unsigned long stack; + +#ifdef CONFIG_FRAME_POINTER + if (!bp) + get_bp(bp); +#endif + + printk("Pid: %d, comm: %.20s %s %s %.*s\n", + current->pid, current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + show_trace(NULL, NULL, &stack, bp); +} +EXPORT_SYMBOL(dump_stack); + +static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static int die_owner = -1; +static unsigned int die_nest_count; + +unsigned __kprobes long oops_begin(void) +{ + int cpu; + unsigned long flags; + + oops_enter(); + + /* racy, but better than risking deadlock. */ + raw_local_irq_save(flags); + cpu = smp_processor_id(); + if (!__raw_spin_trylock(&die_lock)) { + if (cpu == die_owner) + /* nested oops. should stop eventually */; + else + __raw_spin_lock(&die_lock); + } + die_nest_count++; + die_owner = cpu; + console_verbose(); + bust_spinlocks(1); + return flags; +} + +void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) +{ + if (regs && kexec_should_crash(current)) + crash_kexec(regs); + + bust_spinlocks(0); + die_owner = -1; + add_taint(TAINT_DIE); + die_nest_count--; + if (!die_nest_count) + /* Nest count reaches zero, release the lock. */ + __raw_spin_unlock(&die_lock); + raw_local_irq_restore(flags); + oops_exit(); + + if (!signr) + return; + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + do_exit(signr); +} + +int __kprobes __die(const char *str, struct pt_regs *regs, long err) +{ +#ifdef CONFIG_X86_32 + unsigned short ss; + unsigned long sp; +#endif + printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); +#endif +#ifdef CONFIG_SMP + printk("SMP "); +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC + printk("DEBUG_PAGEALLOC"); +#endif + printk("\n"); + sysfs_printk_last_file(); + if (notify_die(DIE_OOPS, str, regs, err, + current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) + return 1; + + show_registers(regs); +#ifdef CONFIG_X86_32 + sp = (unsigned long) (®s->sp); + savesegment(ss, ss); + if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; + } + printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); + print_symbol("%s", regs->ip); + printk(" SS:ESP %04x:%08lx\n", ss, sp); +#else + /* Executive summary in case the oops scrolled away */ + printk(KERN_ALERT "RIP "); + printk_address(regs->ip, 1); + printk(" RSP <%016lx>\n", regs->sp); +#endif + return 0; +} + +/* + * This is gone through when something in the kernel has done something bad + * and is about to be terminated: + */ +void die(const char *str, struct pt_regs *regs, long err) +{ + unsigned long flags = oops_begin(); + int sig = SIGSEGV; + + if (!user_mode_vm(regs)) + report_bug(regs->ip, regs); + + if (__die(str, regs, err)) + sig = 0; + oops_end(flags, regs, sig); +} + +void notrace __kprobes +die_nmi(char *str, struct pt_regs *regs, int do_panic) +{ + unsigned long flags; + + if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) + return; + + /* + * We are in trouble anyway, lets at least try + * to get a message out. + */ + flags = oops_begin(); + printk(KERN_EMERG "%s", str); + printk(" on CPU%d, ip %08lx, registers:\n", + smp_processor_id(), regs->ip); + show_registers(regs); + oops_end(flags, regs, 0); + if (do_panic || panic_on_oops) + panic("Non maskable interrupt"); + nmi_exit(); + local_irq_enable(); + do_exit(SIGBUS); +} + +static int __init oops_setup(char *s) +{ + if (!s) + return -EINVAL; + if (!strcmp(s, "panic")) + panic_on_oops = 1; + return 0; +} +early_param("oops", oops_setup); + +static int __init kstack_setup(char *s) +{ + if (!s) + return -EINVAL; + kstack_depth_to_print = simple_strtoul(s, NULL, 0); + return 0; +} +early_param("kstack", kstack_setup); + +static int __init code_bytes_setup(char *s) +{ + code_bytes = simple_strtoul(s, NULL, 0); + if (code_bytes > 8192) + code_bytes = 8192; + + return 1; +} +__setup("code_bytes=", code_bytes_setup); diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h new file mode 100644 index 000000000000..da87590b8698 --- /dev/null +++ b/arch/x86/kernel/dumpstack.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + */ + +#ifndef DUMPSTACK_H +#define DUMPSTACK_H + +#ifdef CONFIG_X86_32 +#define STACKSLOTS_PER_LINE 8 +#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :) +#else +#define STACKSLOTS_PER_LINE 4 +#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) +#endif + +extern unsigned long +print_context_stack(struct thread_info *tinfo, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph); + +extern void +show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, unsigned long bp, char *log_lvl); + +extern void +show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, unsigned long bp, char *log_lvl); + +extern unsigned int code_bytes; +extern int kstack_depth_to_print; + +/* The form of the top of the frame on the stack */ +struct stack_frame { + struct stack_frame *next_frame; + unsigned long return_address; +}; +#endif diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index b3614752197b..d593cd1f58dc 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -17,69 +17,14 @@ #include <asm/stacktrace.h> -#define STACKSLOTS_PER_LINE 8 -#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :) - -int panic_on_unrecovered_nmi; -int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; -static unsigned int code_bytes = 64; -static int die_counter; - -void printk_address(unsigned long address, int reliable) -{ - printk(" [<%p>] %s%pS\n", (void *) address, - reliable ? "" : "? ", (void *) address); -} - -static inline int valid_stack_ptr(struct thread_info *tinfo, - void *p, unsigned int size, void *end) -{ - void *t = tinfo; - if (end) { - if (p < end && p >= (end-THREAD_SIZE)) - return 1; - else - return 0; - } - return p > t && p < t + THREAD_SIZE - size; -} - -/* The form of the top of the frame on the stack */ -struct stack_frame { - struct stack_frame *next_frame; - unsigned long return_address; -}; - -static inline unsigned long -print_context_stack(struct thread_info *tinfo, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data, - unsigned long *end) -{ - struct stack_frame *frame = (struct stack_frame *)bp; - - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { - unsigned long addr; - - addr = *stack; - if (__kernel_text_address(addr)) { - if ((unsigned long) stack == bp + sizeof(long)) { - ops->address(data, addr, 1); - frame = frame->next_frame; - bp = (unsigned long) frame; - } else { - ops->address(data, addr, bp == 0); - } - } - stack++; - } - return bp; -} +#include "dumpstack.h" void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) { + int graph = 0; + if (!task) task = current; @@ -107,7 +52,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, context = (struct thread_info *) ((unsigned long)stack & (~(THREAD_SIZE - 1))); - bp = print_context_stack(context, stack, bp, ops, data, NULL); + bp = print_context_stack(context, stack, bp, ops, + data, NULL, &graph); stack = (unsigned long *)context->previous_esp; if (!stack) @@ -119,57 +65,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, } EXPORT_SYMBOL(dump_trace); -static void -print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - printk(data); - print_symbol(msg, symbol); - printk("\n"); -} - -static void print_trace_warning(void *data, char *msg) -{ - printk("%s%s\n", (char *)data, msg); -} - -static int print_trace_stack(void *data, char *name) -{ - printk("%s <%s> ", (char *)data, name); - return 0; -} - -/* - * Print one address/symbol entries per line. - */ -static void print_trace_address(void *data, unsigned long addr, int reliable) -{ - touch_nmi_watchdog(); - printk(data); - printk_address(addr, reliable); -} - -static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, - .stack = print_trace_stack, - .address = print_trace_address, -}; - -static void -show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, unsigned long bp, char *log_lvl) -{ - printk("%sCall Trace:\n", log_lvl); - dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); -} - -void show_trace(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, unsigned long bp) -{ - show_trace_log_lvl(task, regs, stack, bp, ""); -} - -static void +void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) { @@ -196,33 +92,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, show_trace_log_lvl(task, regs, sp, bp, log_lvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_log_lvl(task, NULL, sp, 0, ""); -} - -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - unsigned long bp = 0; - unsigned long stack; - -#ifdef CONFIG_FRAME_POINTER - if (!bp) - get_bp(bp); -#endif - - printk("Pid: %d, comm: %.20s %s %s %.*s\n", - current->pid, current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - show_trace(NULL, NULL, &stack, bp); -} - -EXPORT_SYMBOL(dump_stack); void show_registers(struct pt_regs *regs) { @@ -283,167 +152,3 @@ int is_valid_bugaddr(unsigned long ip) return ud2 == 0x0b0f; } -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; -static int die_owner = -1; -static unsigned int die_nest_count; - -unsigned __kprobes long oops_begin(void) -{ - unsigned long flags; - - oops_enter(); - - if (die_owner != raw_smp_processor_id()) { - console_verbose(); - raw_local_irq_save(flags); - __raw_spin_lock(&die_lock); - die_owner = smp_processor_id(); - die_nest_count = 0; - bust_spinlocks(1); - } else { - raw_local_irq_save(flags); - } - die_nest_count++; - return flags; -} - -void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) -{ - bust_spinlocks(0); - die_owner = -1; - add_taint(TAINT_DIE); - __raw_spin_unlock(&die_lock); - raw_local_irq_restore(flags); - - if (!regs) - return; - - if (kexec_should_crash(current)) - crash_kexec(regs); - if (in_interrupt()) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); - oops_exit(); - do_exit(signr); -} - -int __kprobes __die(const char *str, struct pt_regs *regs, long err) -{ - unsigned short ss; - unsigned long sp; - - printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); -#endif -#ifdef CONFIG_SMP - printk("SMP "); -#endif -#ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC"); -#endif - printk("\n"); - sysfs_printk_last_file(); - if (notify_die(DIE_OOPS, str, regs, err, - current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) - return 1; - - show_registers(regs); - /* Executive summary in case the oops scrolled away */ - sp = (unsigned long) (®s->sp); - savesegment(ss, ss); - if (user_mode(regs)) { - sp = regs->sp; - ss = regs->ss & 0xffff; - } - printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); - print_symbol("%s", regs->ip); - printk(" SS:ESP %04x:%08lx\n", ss, sp); - return 0; -} - -/* - * This is gone through when something in the kernel has done something bad - * and is about to be terminated: - */ -void die(const char *str, struct pt_regs *regs, long err) -{ - unsigned long flags = oops_begin(); - - if (die_nest_count < 3) { - report_bug(regs->ip, regs); - - if (__die(str, regs, err)) - regs = NULL; - } else { - printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); - } - - oops_end(flags, regs, SIGSEGV); -} - -static DEFINE_SPINLOCK(nmi_print_lock); - -void notrace __kprobes -die_nmi(char *str, struct pt_regs *regs, int do_panic) -{ - if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) - return; - - spin_lock(&nmi_print_lock); - /* - * We are in trouble anyway, lets at least try - * to get a message out: - */ - bust_spinlocks(1); - printk(KERN_EMERG "%s", str); - printk(" on CPU%d, ip %08lx, registers:\n", - smp_processor_id(), regs->ip); - show_registers(regs); - if (do_panic) - panic("Non maskable interrupt"); - console_silent(); - spin_unlock(&nmi_print_lock); - - /* - * If we are in kernel we are probably nested up pretty bad - * and might aswell get out now while we still can: - */ - if (!user_mode_vm(regs)) { - current->thread.trap_no = 2; - crash_kexec(regs); - } - - bust_spinlocks(0); - do_exit(SIGSEGV); -} - -static int __init oops_setup(char *s) -{ - if (!s) - return -EINVAL; - if (!strcmp(s, "panic")) - panic_on_oops = 1; - return 0; -} -early_param("oops", oops_setup); - -static int __init kstack_setup(char *s) -{ - if (!s) - return -EINVAL; - kstack_depth_to_print = simple_strtoul(s, NULL, 0); - return 0; -} -early_param("kstack", kstack_setup); - -static int __init code_bytes_setup(char *s) -{ - code_bytes = simple_strtoul(s, NULL, 0); - if (code_bytes > 8192) - code_bytes = 8192; - - return 1; -} -__setup("code_bytes=", code_bytes_setup); diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 96a5db7da8a7..c302d0707048 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -17,19 +17,7 @@ #include <asm/stacktrace.h> -#define STACKSLOTS_PER_LINE 4 -#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) - -int panic_on_unrecovered_nmi; -int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; -static unsigned int code_bytes = 64; -static int die_counter; - -void printk_address(unsigned long address, int reliable) -{ - printk(" [<%p>] %s%pS\n", (void *) address, - reliable ? "" : "? ", (void *) address); -} +#include "dumpstack.h" static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, unsigned *usedp, char **idp) @@ -113,51 +101,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack */ -static inline int valid_stack_ptr(struct thread_info *tinfo, - void *p, unsigned int size, void *end) -{ - void *t = tinfo; - if (end) { - if (p < end && p >= (end-THREAD_SIZE)) - return 1; - else - return 0; - } - return p > t && p < t + THREAD_SIZE - size; -} - -/* The form of the top of the frame on the stack */ -struct stack_frame { - struct stack_frame *next_frame; - unsigned long return_address; -}; - -static inline unsigned long -print_context_stack(struct thread_info *tinfo, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data, - unsigned long *end) -{ - struct stack_frame *frame = (struct stack_frame *)bp; - - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { - unsigned long addr; - - addr = *stack; - if (__kernel_text_address(addr)) { - if ((unsigned long) stack == bp + sizeof(long)) { - ops->address(data, addr, 1); - frame = frame->next_frame; - bp = (unsigned long) frame; - } else { - ops->address(data, addr, bp == 0); - } - } - stack++; - } - return bp; -} - void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) @@ -166,6 +109,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; unsigned used = 0; struct thread_info *tinfo; + int graph = 0; if (!task) task = current; @@ -206,7 +150,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, break; bp = print_context_stack(tinfo, stack, bp, ops, - data, estack_end); + data, estack_end, &graph); ops->stack(data, "<EOE>"); /* * We link to the next stack via the @@ -225,7 +169,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, if (ops->stack(data, "IRQ") < 0) break; bp = print_context_stack(tinfo, stack, bp, - ops, data, irqstack_end); + ops, data, irqstack_end, &graph); /* * We link to the next stack (which would be * the process stack normally) the last @@ -243,62 +187,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, /* * This handles the process stack: */ - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL); + bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); put_cpu(); } EXPORT_SYMBOL(dump_trace); -static void -print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - printk(data); - print_symbol(msg, symbol); - printk("\n"); -} - -static void print_trace_warning(void *data, char *msg) -{ - printk("%s%s\n", (char *)data, msg); -} - -static int print_trace_stack(void *data, char *name) -{ - printk("%s <%s> ", (char *)data, name); - return 0; -} - -/* - * Print one address/symbol entries per line. - */ -static void print_trace_address(void *data, unsigned long addr, int reliable) -{ - touch_nmi_watchdog(); - printk(data); - printk_address(addr, reliable); -} - -static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, - .stack = print_trace_stack, - .address = print_trace_address, -}; - -static void -show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, unsigned long bp, char *log_lvl) -{ - printk("%sCall Trace:\n", log_lvl); - dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); -} - -void show_trace(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, unsigned long bp) -{ - show_trace_log_lvl(task, regs, stack, bp, ""); -} - -static void +void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) { @@ -342,33 +236,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, show_trace_log_lvl(task, regs, sp, bp, log_lvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_log_lvl(task, NULL, sp, 0, ""); -} - -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - unsigned long bp = 0; - unsigned long stack; - -#ifdef CONFIG_FRAME_POINTER - if (!bp) - get_bp(bp); -#endif - - printk("Pid: %d, comm: %.20s %s %s %.*s\n", - current->pid, current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - show_trace(NULL, NULL, &stack, bp); -} -EXPORT_SYMBOL(dump_stack); - void show_registers(struct pt_regs *regs) { int i; @@ -429,147 +296,3 @@ int is_valid_bugaddr(unsigned long ip) return ud2 == 0x0b0f; } -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; -static int die_owner = -1; -static unsigned int die_nest_count; - -unsigned __kprobes long oops_begin(void) -{ - int cpu; - unsigned long flags; - - oops_enter(); - - /* racy, but better than risking deadlock. */ - raw_local_irq_save(flags); - cpu = smp_processor_id(); - if (!__raw_spin_trylock(&die_lock)) { - if (cpu == die_owner) - /* nested oops. should stop eventually */; - else - __raw_spin_lock(&die_lock); - } - die_nest_count++; - die_owner = cpu; - console_verbose(); - bust_spinlocks(1); - return flags; -} - -void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) -{ - die_owner = -1; - bust_spinlocks(0); - die_nest_count--; - if (!die_nest_count) - /* Nest count reaches zero, release the lock. */ - __raw_spin_unlock(&die_lock); - raw_local_irq_restore(flags); - if (!regs) { - oops_exit(); - return; - } - if (in_interrupt()) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); - oops_exit(); - do_exit(signr); -} - -int __kprobes __die(const char *str, struct pt_regs *regs, long err) -{ - printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); -#endif -#ifdef CONFIG_SMP - printk("SMP "); -#endif -#ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC"); -#endif - printk("\n"); - sysfs_printk_last_file(); - if (notify_die(DIE_OOPS, str, regs, err, - current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) - return 1; - - show_registers(regs); - add_taint(TAINT_DIE); - /* Executive summary in case the oops scrolled away */ - printk(KERN_ALERT "RIP "); - printk_address(regs->ip, 1); - printk(" RSP <%016lx>\n", regs->sp); - if (kexec_should_crash(current)) - crash_kexec(regs); - return 0; -} - -void die(const char *str, struct pt_regs *regs, long err) -{ - unsigned long flags = oops_begin(); - - if (!user_mode(regs)) - report_bug(regs->ip, regs); - - if (__die(str, regs, err)) - regs = NULL; - oops_end(flags, regs, SIGSEGV); -} - -notrace __kprobes void -die_nmi(char *str, struct pt_regs *regs, int do_panic) -{ - unsigned long flags; - - if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) - return; - - flags = oops_begin(); - /* - * We are in trouble anyway, lets at least try - * to get a message out. - */ - printk(KERN_EMERG "%s", str); - printk(" on CPU%d, ip %08lx, registers:\n", - smp_processor_id(), regs->ip); - show_registers(regs); - if (kexec_should_crash(current)) - crash_kexec(regs); - if (do_panic || panic_on_oops) - panic("Non maskable interrupt"); - oops_end(flags, NULL, SIGBUS); - nmi_exit(); - local_irq_enable(); - do_exit(SIGBUS); -} - -static int __init oops_setup(char *s) -{ - if (!s) - return -EINVAL; - if (!strcmp(s, "panic")) - panic_on_oops = 1; - return 0; -} -early_param("oops", oops_setup); - -static int __init kstack_setup(char *s) -{ - if (!s) - return -EINVAL; - kstack_depth_to_print = simple_strtoul(s, NULL, 0); - return 0; -} -early_param("kstack", kstack_setup); - -static int __init code_bytes_setup(char *s) -{ - code_bytes = simple_strtoul(s, NULL, 0); - if (code_bytes > 8192) - code_bytes = 8192; - - return 1; -} -__setup("code_bytes=", code_bytes_setup); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 7aafeb5263ef..65a13943e098 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -677,22 +677,6 @@ struct early_res { }; static struct early_res early_res[MAX_EARLY_RES] __initdata = { { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ -#if defined(CONFIG_X86_64) && defined(CONFIG_X86_TRAMPOLINE) - { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" }, -#endif -#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) - /* - * But first pinch a few for the stack/trampoline stuff - * FIXME: Don't need the extra page at 4K, but need to fix - * trampoline before removing it. (see the GDT stuff) - */ - { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE" }, - /* - * Has to be in very low memory so we can execute - * real-mode AP code. - */ - { TRAMPOLINE_BASE, TRAMPOLINE_BASE + PAGE_SIZE, "TRAMPOLINE" }, -#endif {} }; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 1b894b72c0f5..744aa7fc49d5 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -17,6 +17,7 @@ #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/iommu.h> +#include <asm/gart.h> static void __init fix_hypertransport_config(int num, int slot, int func) { diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 34ad997d3834..23b138e31e9c 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -875,49 +875,6 @@ static struct console early_dbgp_console = { }; #endif -/* Console interface to a host file on AMD's SimNow! */ - -static int simnow_fd; - -enum { - MAGIC1 = 0xBACCD00A, - MAGIC2 = 0xCA110000, - XOPEN = 5, - XWRITE = 4, -}; - -static noinline long simnow(long cmd, long a, long b, long c) -{ - long ret; - - asm volatile("cpuid" : - "=a" (ret) : - "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2)); - return ret; -} - -static void __init simnow_init(char *str) -{ - char *fn = "klog"; - - if (*str == '=') - fn = ++str; - /* error ignored */ - simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644); -} - -static void simnow_write(struct console *con, const char *s, unsigned n) -{ - simnow(XWRITE, simnow_fd, (unsigned long)s, n); -} - -static struct console simnow_console = { - .name = "simnow", - .write = simnow_write, - .flags = CON_PRINTBUFFER, - .index = -1, -}; - /* Direct interface for emergencies */ static struct console *early_console = &early_vga_console; static int __initdata early_console_initialized; @@ -960,10 +917,6 @@ static int __init setup_early_printk(char *buf) max_ypos = boot_params.screen_info.orig_video_lines; current_ypos = boot_params.screen_info.orig_y; early_console = &early_vga_console; - } else if (!strncmp(buf, "simnow", 6)) { - simnow_init(buf + 6); - early_console = &simnow_console; - keep_early = 1; #ifdef CONFIG_EARLY_PRINTK_DBGP } else if (!strncmp(buf, "dbgp", 4)) { if (early_dbgp_init(buf+4) < 0) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 28b597ef9ca1..d6f0490a7391 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -619,28 +619,37 @@ END(syscall_badsys) 27:; /* - * Build the entry stubs and pointer table with - * some assembler magic. + * Build the entry stubs and pointer table with some assembler magic. + * We pack 7 stubs into a single 32-byte chunk, which will fit in a + * single cache line on all modern x86 implementations. */ -.section .rodata,"a" +.section .init.rodata,"a" ENTRY(interrupt) .text - + .p2align 5 + .p2align CONFIG_X86_L1_CACHE_SHIFT ENTRY(irq_entries_start) RING0_INT_FRAME -vector=0 -.rept NR_VECTORS - ALIGN - .if vector +vector=FIRST_EXTERNAL_VECTOR +.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 + .balign 32 + .rept 7 + .if vector < NR_VECTORS + .if vector <> FIRST_EXTERNAL_VECTOR CFI_ADJUST_CFA_OFFSET -4 - .endif -1: pushl $~(vector) + .endif +1: pushl $(~vector+0x80) /* Note: always in signed byte range */ CFI_ADJUST_CFA_OFFSET 4 - jmp common_interrupt - .previous + .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 + jmp 2f + .endif + .previous .long 1b - .text + .text vector=vector+1 + .endif + .endr +2: jmp common_interrupt .endr END(irq_entries_start) @@ -652,8 +661,9 @@ END(interrupt) * the CPU automatically disables interrupts when executing an IRQ vector, * so IRQ-flags tracing has to follow that: */ - ALIGN + .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: + addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ SAVE_ALL TRACE_IRQS_OFF movl %esp,%eax @@ -678,65 +688,6 @@ ENDPROC(name) /* The include is where all of the SMP etc. interrupts come from */ #include "entry_arch.h" -KPROBE_ENTRY(page_fault) - RING0_EC_FRAME - pushl $do_page_fault - CFI_ADJUST_CFA_OFFSET 4 - ALIGN -error_code: - /* the function address is in %fs's slot on the stack */ - pushl %es - CFI_ADJUST_CFA_OFFSET 4 - /*CFI_REL_OFFSET es, 0*/ - pushl %ds - CFI_ADJUST_CFA_OFFSET 4 - /*CFI_REL_OFFSET ds, 0*/ - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET eax, 0 - pushl %ebp - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET ebp, 0 - pushl %edi - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET edi, 0 - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET esi, 0 - pushl %edx - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET edx, 0 - pushl %ecx - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET ecx, 0 - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET ebx, 0 - cld - pushl %fs - CFI_ADJUST_CFA_OFFSET 4 - /*CFI_REL_OFFSET fs, 0*/ - movl $(__KERNEL_PERCPU), %ecx - movl %ecx, %fs - UNWIND_ESPFIX_STACK - popl %ecx - CFI_ADJUST_CFA_OFFSET -4 - /*CFI_REGISTER es, ecx*/ - movl PT_FS(%esp), %edi # get the function address - movl PT_ORIG_EAX(%esp), %edx # get the error code - movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart - mov %ecx, PT_FS(%esp) - /*CFI_REL_OFFSET fs, ES*/ - movl $(__USER_DS), %ecx - movl %ecx, %ds - movl %ecx, %es - TRACE_IRQS_OFF - movl %esp,%eax # pt_regs pointer - call *%edi - jmp ret_from_exception - CFI_ENDPROC -KPROBE_END(page_fault) - ENTRY(coprocessor_error) RING0_INT_FRAME pushl $0 @@ -767,140 +718,6 @@ ENTRY(device_not_available) CFI_ENDPROC END(device_not_available) -/* - * Debug traps and NMI can happen at the one SYSENTER instruction - * that sets up the real kernel stack. Check here, since we can't - * allow the wrong stack to be used. - * - * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have - * already pushed 3 words if it hits on the sysenter instruction: - * eflags, cs and eip. - * - * We just load the right stack, and push the three (known) values - * by hand onto the new stack - while updating the return eip past - * the instruction that would have done it for sysenter. - */ -#define FIX_STACK(offset, ok, label) \ - cmpw $__KERNEL_CS,4(%esp); \ - jne ok; \ -label: \ - movl TSS_sysenter_sp0+offset(%esp),%esp; \ - CFI_DEF_CFA esp, 0; \ - CFI_UNDEFINED eip; \ - pushfl; \ - CFI_ADJUST_CFA_OFFSET 4; \ - pushl $__KERNEL_CS; \ - CFI_ADJUST_CFA_OFFSET 4; \ - pushl $sysenter_past_esp; \ - CFI_ADJUST_CFA_OFFSET 4; \ - CFI_REL_OFFSET eip, 0 - -KPROBE_ENTRY(debug) - RING0_INT_FRAME - cmpl $ia32_sysenter_target,(%esp) - jne debug_stack_correct - FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) -debug_stack_correct: - pushl $-1 # mark this as an int - CFI_ADJUST_CFA_OFFSET 4 - SAVE_ALL - TRACE_IRQS_OFF - xorl %edx,%edx # error code 0 - movl %esp,%eax # pt_regs pointer - call do_debug - jmp ret_from_exception - CFI_ENDPROC -KPROBE_END(debug) - -/* - * NMI is doubly nasty. It can happen _while_ we're handling - * a debug fault, and the debug fault hasn't yet been able to - * clear up the stack. So we first check whether we got an - * NMI on the sysenter entry path, but after that we need to - * check whether we got an NMI on the debug path where the debug - * fault happened on the sysenter path. - */ -KPROBE_ENTRY(nmi) - RING0_INT_FRAME - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 - movl %ss, %eax - cmpw $__ESPFIX_SS, %ax - popl %eax - CFI_ADJUST_CFA_OFFSET -4 - je nmi_espfix_stack - cmpl $ia32_sysenter_target,(%esp) - je nmi_stack_fixup - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 - movl %esp,%eax - /* Do not access memory above the end of our stack page, - * it might not exist. - */ - andl $(THREAD_SIZE-1),%eax - cmpl $(THREAD_SIZE-20),%eax - popl %eax - CFI_ADJUST_CFA_OFFSET -4 - jae nmi_stack_correct - cmpl $ia32_sysenter_target,12(%esp) - je nmi_debug_stack_check -nmi_stack_correct: - /* We have a RING0_INT_FRAME here */ - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 - SAVE_ALL - TRACE_IRQS_OFF - xorl %edx,%edx # zero error code - movl %esp,%eax # pt_regs pointer - call do_nmi - jmp restore_nocheck_notrace - CFI_ENDPROC - -nmi_stack_fixup: - RING0_INT_FRAME - FIX_STACK(12,nmi_stack_correct, 1) - jmp nmi_stack_correct - -nmi_debug_stack_check: - /* We have a RING0_INT_FRAME here */ - cmpw $__KERNEL_CS,16(%esp) - jne nmi_stack_correct - cmpl $debug,(%esp) - jb nmi_stack_correct - cmpl $debug_esp_fix_insn,(%esp) - ja nmi_stack_correct - FIX_STACK(24,nmi_stack_correct, 1) - jmp nmi_stack_correct - -nmi_espfix_stack: - /* We have a RING0_INT_FRAME here. - * - * create the pointer to lss back - */ - pushl %ss - CFI_ADJUST_CFA_OFFSET 4 - pushl %esp - CFI_ADJUST_CFA_OFFSET 4 - addw $4, (%esp) - /* copy the iret frame of 12 bytes */ - .rept 3 - pushl 16(%esp) - CFI_ADJUST_CFA_OFFSET 4 - .endr - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 - SAVE_ALL - TRACE_IRQS_OFF - FIXUP_ESPFIX_STACK # %eax == %esp - xorl %edx,%edx # zero error code - call do_nmi - RESTORE_REGS - lss 12+4(%esp), %esp # back to espfix stack - CFI_ADJUST_CFA_OFFSET -24 - jmp irq_return - CFI_ENDPROC -KPROBE_END(nmi) - #ifdef CONFIG_PARAVIRT ENTRY(native_iret) iret @@ -916,19 +733,6 @@ ENTRY(native_irq_enable_sysexit) END(native_irq_enable_sysexit) #endif -KPROBE_ENTRY(int3) - RING0_INT_FRAME - pushl $-1 # mark this as an int - CFI_ADJUST_CFA_OFFSET 4 - SAVE_ALL - TRACE_IRQS_OFF - xorl %edx,%edx # zero error code - movl %esp,%eax # pt_regs pointer - call do_int3 - jmp ret_from_exception - CFI_ENDPROC -KPROBE_END(int3) - ENTRY(overflow) RING0_INT_FRAME pushl $0 @@ -993,14 +797,6 @@ ENTRY(stack_segment) CFI_ENDPROC END(stack_segment) -KPROBE_ENTRY(general_protection) - RING0_EC_FRAME - pushl $do_general_protection - CFI_ADJUST_CFA_OFFSET 4 - jmp error_code - CFI_ENDPROC -KPROBE_END(general_protection) - ENTRY(alignment_check) RING0_EC_FRAME pushl $do_alignment_check @@ -1051,6 +847,7 @@ ENTRY(kernel_thread_helper) push %eax CFI_ADJUST_CFA_OFFSET 4 call do_exit + ud2 # padding for call trace CFI_ENDPROC ENDPROC(kernel_thread_helper) @@ -1157,6 +954,9 @@ ENTRY(mcount) END(mcount) ENTRY(ftrace_caller) + cmpl $0, function_trace_stop + jne ftrace_stub + pushl %eax pushl %ecx pushl %edx @@ -1171,6 +971,11 @@ ftrace_call: popl %edx popl %ecx popl %eax +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +.globl ftrace_graph_call +ftrace_graph_call: + jmp ftrace_stub +#endif .globl ftrace_stub ftrace_stub: @@ -1180,8 +985,18 @@ END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) + cmpl $0, function_trace_stop + jne ftrace_stub + cmpl $ftrace_stub, ftrace_trace_function jnz trace +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + cmpl $ftrace_stub, ftrace_graph_return + jnz ftrace_graph_caller + + cmpl $ftrace_graph_entry_stub, ftrace_graph_entry + jnz ftrace_graph_caller +#endif .globl ftrace_stub ftrace_stub: ret @@ -1200,13 +1015,268 @@ trace: popl %edx popl %ecx popl %eax - jmp ftrace_stub END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + cmpl $0, function_trace_stop + jne ftrace_stub + + pushl %eax + pushl %ecx + pushl %edx + movl 0xc(%esp), %edx + lea 0x4(%ebp), %eax + subl $MCOUNT_INSN_SIZE, %edx + call prepare_ftrace_return + popl %edx + popl %ecx + popl %eax + ret +END(ftrace_graph_caller) + +.globl return_to_handler +return_to_handler: + pushl $0 + pushl %eax + pushl %ecx + pushl %edx + call ftrace_return_to_handler + movl %eax, 0xc(%esp) + popl %edx + popl %ecx + popl %eax + ret +#endif + .section .rodata,"a" #include "syscall_table_32.S" syscall_table_size=(.-sys_call_table) + +/* + * Some functions should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" + +ENTRY(page_fault) + RING0_EC_FRAME + pushl $do_page_fault + CFI_ADJUST_CFA_OFFSET 4 + ALIGN +error_code: + /* the function address is in %fs's slot on the stack */ + pushl %es + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET es, 0*/ + pushl %ds + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET ds, 0*/ + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET eax, 0 + pushl %ebp + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebp, 0 + pushl %edi + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET edi, 0 + pushl %esi + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET esi, 0 + pushl %edx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET edx, 0 + pushl %ecx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ecx, 0 + pushl %ebx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebx, 0 + cld + pushl %fs + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET fs, 0*/ + movl $(__KERNEL_PERCPU), %ecx + movl %ecx, %fs + UNWIND_ESPFIX_STACK + popl %ecx + CFI_ADJUST_CFA_OFFSET -4 + /*CFI_REGISTER es, ecx*/ + movl PT_FS(%esp), %edi # get the function address + movl PT_ORIG_EAX(%esp), %edx # get the error code + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart + mov %ecx, PT_FS(%esp) + /*CFI_REL_OFFSET fs, ES*/ + movl $(__USER_DS), %ecx + movl %ecx, %ds + movl %ecx, %es + TRACE_IRQS_OFF + movl %esp,%eax # pt_regs pointer + call *%edi + jmp ret_from_exception + CFI_ENDPROC +END(page_fault) + +/* + * Debug traps and NMI can happen at the one SYSENTER instruction + * that sets up the real kernel stack. Check here, since we can't + * allow the wrong stack to be used. + * + * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have + * already pushed 3 words if it hits on the sysenter instruction: + * eflags, cs and eip. + * + * We just load the right stack, and push the three (known) values + * by hand onto the new stack - while updating the return eip past + * the instruction that would have done it for sysenter. + */ +#define FIX_STACK(offset, ok, label) \ + cmpw $__KERNEL_CS,4(%esp); \ + jne ok; \ +label: \ + movl TSS_sysenter_sp0+offset(%esp),%esp; \ + CFI_DEF_CFA esp, 0; \ + CFI_UNDEFINED eip; \ + pushfl; \ + CFI_ADJUST_CFA_OFFSET 4; \ + pushl $__KERNEL_CS; \ + CFI_ADJUST_CFA_OFFSET 4; \ + pushl $sysenter_past_esp; \ + CFI_ADJUST_CFA_OFFSET 4; \ + CFI_REL_OFFSET eip, 0 + +ENTRY(debug) + RING0_INT_FRAME + cmpl $ia32_sysenter_target,(%esp) + jne debug_stack_correct + FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) +debug_stack_correct: + pushl $-1 # mark this as an int + CFI_ADJUST_CFA_OFFSET 4 + SAVE_ALL + TRACE_IRQS_OFF + xorl %edx,%edx # error code 0 + movl %esp,%eax # pt_regs pointer + call do_debug + jmp ret_from_exception + CFI_ENDPROC +END(debug) + +/* + * NMI is doubly nasty. It can happen _while_ we're handling + * a debug fault, and the debug fault hasn't yet been able to + * clear up the stack. So we first check whether we got an + * NMI on the sysenter entry path, but after that we need to + * check whether we got an NMI on the debug path where the debug + * fault happened on the sysenter path. + */ +ENTRY(nmi) + RING0_INT_FRAME + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + movl %ss, %eax + cmpw $__ESPFIX_SS, %ax + popl %eax + CFI_ADJUST_CFA_OFFSET -4 + je nmi_espfix_stack + cmpl $ia32_sysenter_target,(%esp) + je nmi_stack_fixup + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + movl %esp,%eax + /* Do not access memory above the end of our stack page, + * it might not exist. + */ + andl $(THREAD_SIZE-1),%eax + cmpl $(THREAD_SIZE-20),%eax + popl %eax + CFI_ADJUST_CFA_OFFSET -4 + jae nmi_stack_correct + cmpl $ia32_sysenter_target,12(%esp) + je nmi_debug_stack_check +nmi_stack_correct: + /* We have a RING0_INT_FRAME here */ + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + SAVE_ALL + TRACE_IRQS_OFF + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_nmi + jmp restore_nocheck_notrace + CFI_ENDPROC + +nmi_stack_fixup: + RING0_INT_FRAME + FIX_STACK(12,nmi_stack_correct, 1) + jmp nmi_stack_correct + +nmi_debug_stack_check: + /* We have a RING0_INT_FRAME here */ + cmpw $__KERNEL_CS,16(%esp) + jne nmi_stack_correct + cmpl $debug,(%esp) + jb nmi_stack_correct + cmpl $debug_esp_fix_insn,(%esp) + ja nmi_stack_correct + FIX_STACK(24,nmi_stack_correct, 1) + jmp nmi_stack_correct + +nmi_espfix_stack: + /* We have a RING0_INT_FRAME here. + * + * create the pointer to lss back + */ + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + pushl %esp + CFI_ADJUST_CFA_OFFSET 4 + addw $4, (%esp) + /* copy the iret frame of 12 bytes */ + .rept 3 + pushl 16(%esp) + CFI_ADJUST_CFA_OFFSET 4 + .endr + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + SAVE_ALL + TRACE_IRQS_OFF + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code + call do_nmi + RESTORE_REGS + lss 12+4(%esp), %esp # back to espfix stack + CFI_ADJUST_CFA_OFFSET -24 + jmp irq_return + CFI_ENDPROC +END(nmi) + +ENTRY(int3) + RING0_INT_FRAME + pushl $-1 # mark this as an int + CFI_ADJUST_CFA_OFFSET 4 + SAVE_ALL + TRACE_IRQS_OFF + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_int3 + jmp ret_from_exception + CFI_ENDPROC +END(int3) + +ENTRY(general_protection) + RING0_EC_FRAME + pushl $do_general_protection + CFI_ADJUST_CFA_OFFSET 4 + jmp error_code + CFI_ENDPROC +END(general_protection) + +/* + * End of kprobes section + */ + .popsection diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b86f332c96a6..e28c7a987793 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -11,15 +11,15 @@ * * NOTE: This code handles signal-recognition, which happens every time * after an interrupt and after each system call. - * - * Normal syscalls and interrupts don't save a full stack frame, this is + * + * Normal syscalls and interrupts don't save a full stack frame, this is * only done for syscall tracing, signals or fork/exec et.al. - * - * A note on terminology: - * - top of stack: Architecture defined interrupt frame from SS to RIP - * at the top of the kernel process stack. + * + * A note on terminology: + * - top of stack: Architecture defined interrupt frame from SS to RIP + * at the top of the kernel process stack. * - partial stack frame: partially saved registers upto R11. - * - full stack frame: Like partial stack frame, but all register saved. + * - full stack frame: Like partial stack frame, but all register saved. * * Some macro usage: * - CFI macros are used to generate dwarf2 unwind information for better @@ -60,7 +60,6 @@ #define __AUDIT_ARCH_LE 0x40000000 .code64 - #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) @@ -68,16 +67,10 @@ ENTRY(mcount) END(mcount) ENTRY(ftrace_caller) + cmpl $0, function_trace_stop + jne ftrace_stub - /* taken from glibc */ - subq $0x38, %rsp - movq %rax, (%rsp) - movq %rcx, 8(%rsp) - movq %rdx, 16(%rsp) - movq %rsi, 24(%rsp) - movq %rdi, 32(%rsp) - movq %r8, 40(%rsp) - movq %r9, 48(%rsp) + MCOUNT_SAVE_FRAME movq 0x38(%rsp), %rdi movq 8(%rbp), %rsi @@ -87,14 +80,13 @@ ENTRY(ftrace_caller) ftrace_call: call ftrace_stub - movq 48(%rsp), %r9 - movq 40(%rsp), %r8 - movq 32(%rsp), %rdi - movq 24(%rsp), %rsi - movq 16(%rsp), %rdx - movq 8(%rsp), %rcx - movq (%rsp), %rax - addq $0x38, %rsp + MCOUNT_RESTORE_FRAME + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +.globl ftrace_graph_call +ftrace_graph_call: + jmp ftrace_stub +#endif .globl ftrace_stub ftrace_stub: @@ -103,15 +95,63 @@ END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) + cmpl $0, function_trace_stop + jne ftrace_stub + cmpq $ftrace_stub, ftrace_trace_function jnz trace + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + cmpq $ftrace_stub, ftrace_graph_return + jnz ftrace_graph_caller + + cmpq $ftrace_graph_entry_stub, ftrace_graph_entry + jnz ftrace_graph_caller +#endif + .globl ftrace_stub ftrace_stub: retq trace: - /* taken from glibc */ - subq $0x38, %rsp + MCOUNT_SAVE_FRAME + + movq 0x38(%rsp), %rdi + movq 8(%rbp), %rsi + subq $MCOUNT_INSN_SIZE, %rdi + + call *ftrace_trace_function + + MCOUNT_RESTORE_FRAME + + jmp ftrace_stub +END(mcount) +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_TRACER */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + cmpl $0, function_trace_stop + jne ftrace_stub + + MCOUNT_SAVE_FRAME + + leaq 8(%rbp), %rdi + movq 0x38(%rsp), %rsi + subq $MCOUNT_INSN_SIZE, %rsi + + call prepare_ftrace_return + + MCOUNT_RESTORE_FRAME + + retq +END(ftrace_graph_caller) + + +.globl return_to_handler +return_to_handler: + subq $80, %rsp + movq %rax, (%rsp) movq %rcx, 8(%rsp) movq %rdx, 16(%rsp) @@ -119,13 +159,14 @@ trace: movq %rdi, 32(%rsp) movq %r8, 40(%rsp) movq %r9, 48(%rsp) + movq %r10, 56(%rsp) + movq %r11, 64(%rsp) - movq 0x38(%rsp), %rdi - movq 8(%rbp), %rsi - subq $MCOUNT_INSN_SIZE, %rdi - - call *ftrace_trace_function + call ftrace_return_to_handler + movq %rax, 72(%rsp) + movq 64(%rsp), %r11 + movq 56(%rsp), %r10 movq 48(%rsp), %r9 movq 40(%rsp), %r8 movq 32(%rsp), %rdi @@ -133,16 +174,14 @@ trace: movq 16(%rsp), %rdx movq 8(%rsp), %rcx movq (%rsp), %rax - addq $0x38, %rsp + addq $72, %rsp + retq +#endif - jmp ftrace_stub -END(mcount) -#endif /* CONFIG_DYNAMIC_FTRACE */ -#endif /* CONFIG_FUNCTION_TRACER */ #ifndef CONFIG_PREEMPT #define retint_kernel retint_restore_args -#endif +#endif #ifdef CONFIG_PARAVIRT ENTRY(native_usergs_sysret64) @@ -161,29 +200,29 @@ ENTRY(native_usergs_sysret64) .endm /* - * C code is not supposed to know about undefined top of stack. Every time - * a C function with an pt_regs argument is called from the SYSCALL based + * C code is not supposed to know about undefined top of stack. Every time + * a C function with an pt_regs argument is called from the SYSCALL based * fast path FIXUP_TOP_OF_STACK is needed. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs * manipulation. - */ - - /* %rsp:at FRAMEEND */ - .macro FIXUP_TOP_OF_STACK tmp - movq %gs:pda_oldrsp,\tmp - movq \tmp,RSP(%rsp) - movq $__USER_DS,SS(%rsp) - movq $__USER_CS,CS(%rsp) - movq $-1,RCX(%rsp) - movq R11(%rsp),\tmp /* get eflags */ - movq \tmp,EFLAGS(%rsp) + */ + + /* %rsp:at FRAMEEND */ + .macro FIXUP_TOP_OF_STACK tmp offset=0 + movq %gs:pda_oldrsp,\tmp + movq \tmp,RSP+\offset(%rsp) + movq $__USER_DS,SS+\offset(%rsp) + movq $__USER_CS,CS+\offset(%rsp) + movq $-1,RCX+\offset(%rsp) + movq R11+\offset(%rsp),\tmp /* get eflags */ + movq \tmp,EFLAGS+\offset(%rsp) .endm - .macro RESTORE_TOP_OF_STACK tmp,offset=0 - movq RSP-\offset(%rsp),\tmp - movq \tmp,%gs:pda_oldrsp - movq EFLAGS-\offset(%rsp),\tmp - movq \tmp,R11-\offset(%rsp) + .macro RESTORE_TOP_OF_STACK tmp offset=0 + movq RSP+\offset(%rsp),\tmp + movq \tmp,%gs:pda_oldrsp + movq EFLAGS+\offset(%rsp),\tmp + movq \tmp,R11+\offset(%rsp) .endm .macro FAKE_STACK_FRAME child_rip @@ -195,7 +234,7 @@ ENTRY(native_usergs_sysret64) pushq %rax /* rsp */ CFI_ADJUST_CFA_OFFSET 8 CFI_REL_OFFSET rsp,0 - pushq $(1<<9) /* eflags - interrupts on */ + pushq $X86_EFLAGS_IF /* eflags - interrupts on */ CFI_ADJUST_CFA_OFFSET 8 /*CFI_REL_OFFSET rflags,0*/ pushq $__KERNEL_CS /* cs */ @@ -213,62 +252,184 @@ ENTRY(native_usergs_sysret64) CFI_ADJUST_CFA_OFFSET -(6*8) .endm - .macro CFI_DEFAULT_STACK start=1 +/* + * initial frame state for interrupts (and exceptions without error code) + */ + .macro EMPTY_FRAME start=1 offset=0 .if \start - CFI_STARTPROC simple + CFI_STARTPROC simple CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,SS+8 + CFI_DEF_CFA rsp,8+\offset .else - CFI_DEF_CFA_OFFSET SS+8 + CFI_DEF_CFA_OFFSET 8+\offset .endif - CFI_REL_OFFSET r15,R15 - CFI_REL_OFFSET r14,R14 - CFI_REL_OFFSET r13,R13 - CFI_REL_OFFSET r12,R12 - CFI_REL_OFFSET rbp,RBP - CFI_REL_OFFSET rbx,RBX - CFI_REL_OFFSET r11,R11 - CFI_REL_OFFSET r10,R10 - CFI_REL_OFFSET r9,R9 - CFI_REL_OFFSET r8,R8 - CFI_REL_OFFSET rax,RAX - CFI_REL_OFFSET rcx,RCX - CFI_REL_OFFSET rdx,RDX - CFI_REL_OFFSET rsi,RSI - CFI_REL_OFFSET rdi,RDI - CFI_REL_OFFSET rip,RIP - /*CFI_REL_OFFSET cs,CS*/ - /*CFI_REL_OFFSET rflags,EFLAGS*/ - CFI_REL_OFFSET rsp,RSP - /*CFI_REL_OFFSET ss,SS*/ .endm + +/* + * initial frame state for interrupts (and exceptions without error code) + */ + .macro INTR_FRAME start=1 offset=0 + EMPTY_FRAME \start, SS+8+\offset-RIP + /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ + CFI_REL_OFFSET rsp, RSP+\offset-RIP + /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ + /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ + CFI_REL_OFFSET rip, RIP+\offset-RIP + .endm + +/* + * initial frame state for exceptions with error code (and interrupts + * with vector already pushed) + */ + .macro XCPT_FRAME start=1 offset=0 + INTR_FRAME \start, RIP+\offset-ORIG_RAX + /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ + .endm + /* - * A newly forked process directly context switches into this. - */ -/* rdi: prev */ + * frame that enables calling into C. + */ + .macro PARTIAL_FRAME start=1 offset=0 + XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET + CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET + CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET + CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET + CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET + CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET + CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET + CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET + CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET + CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET + .endm + +/* + * frame that enables passing a complete pt_regs to a C function. + */ + .macro DEFAULT_FRAME start=1 offset=0 + PARTIAL_FRAME \start, R11+\offset-R15 + CFI_REL_OFFSET rbx, RBX+\offset + CFI_REL_OFFSET rbp, RBP+\offset + CFI_REL_OFFSET r12, R12+\offset + CFI_REL_OFFSET r13, R13+\offset + CFI_REL_OFFSET r14, R14+\offset + CFI_REL_OFFSET r15, R15+\offset + .endm + +/* save partial stack frame */ +ENTRY(save_args) + XCPT_FRAME + cld + movq_cfi rdi, RDI+16-ARGOFFSET + movq_cfi rsi, RSI+16-ARGOFFSET + movq_cfi rdx, RDX+16-ARGOFFSET + movq_cfi rcx, RCX+16-ARGOFFSET + movq_cfi rax, RAX+16-ARGOFFSET + movq_cfi r8, R8+16-ARGOFFSET + movq_cfi r9, R9+16-ARGOFFSET + movq_cfi r10, R10+16-ARGOFFSET + movq_cfi r11, R11+16-ARGOFFSET + + leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ + movq_cfi rbp, 8 /* push %rbp */ + leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ + testl $3, CS(%rdi) + je 1f + SWAPGS + /* + * irqcount is used to check if a CPU is already on an interrupt stack + * or not. While this is essentially redundant with preempt_count it is + * a little cheaper to use a separate counter in the PDA (short of + * moving irq_enter into assembly, which would be too much work) + */ +1: incl %gs:pda_irqcount + jne 2f + popq_cfi %rax /* move return address... */ + mov %gs:pda_irqstackptr,%rsp + EMPTY_FRAME 0 + pushq_cfi %rax /* ... to the new stack */ + /* + * We entered an interrupt context - irqs are off: + */ +2: TRACE_IRQS_OFF + ret + CFI_ENDPROC +END(save_args) + +ENTRY(save_rest) + PARTIAL_FRAME 1 REST_SKIP+8 + movq 5*8+16(%rsp), %r11 /* save return address */ + movq_cfi rbx, RBX+16 + movq_cfi rbp, RBP+16 + movq_cfi r12, R12+16 + movq_cfi r13, R13+16 + movq_cfi r14, R14+16 + movq_cfi r15, R15+16 + movq %r11, 8(%rsp) /* return address */ + FIXUP_TOP_OF_STACK %r11, 16 + ret + CFI_ENDPROC +END(save_rest) + +/* save complete stack frame */ +ENTRY(save_paranoid) + XCPT_FRAME 1 RDI+8 + cld + movq_cfi rdi, RDI+8 + movq_cfi rsi, RSI+8 + movq_cfi rdx, RDX+8 + movq_cfi rcx, RCX+8 + movq_cfi rax, RAX+8 + movq_cfi r8, R8+8 + movq_cfi r9, R9+8 + movq_cfi r10, R10+8 + movq_cfi r11, R11+8 + movq_cfi rbx, RBX+8 + movq_cfi rbp, RBP+8 + movq_cfi r12, R12+8 + movq_cfi r13, R13+8 + movq_cfi r14, R14+8 + movq_cfi r15, R15+8 + movl $1,%ebx + movl $MSR_GS_BASE,%ecx + rdmsr + testl %edx,%edx + js 1f /* negative -> in kernel */ + SWAPGS + xorl %ebx,%ebx +1: ret + CFI_ENDPROC +END(save_paranoid) + +/* + * A newly forked process directly context switches into this address. + * + * rdi: prev task we switched from + */ ENTRY(ret_from_fork) - CFI_DEFAULT_STACK + DEFAULT_FRAME + push kernel_eflags(%rip) CFI_ADJUST_CFA_OFFSET 8 - popf # reset kernel eflags + popf # reset kernel eflags CFI_ADJUST_CFA_OFFSET -8 - call schedule_tail + + call schedule_tail # rdi: 'prev' task parameter + GET_THREAD_INFO(%rcx) - testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) - jnz rff_trace -rff_action: + + CFI_REMEMBER_STATE RESTORE_REST - testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? + + testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? je int_ret_from_sys_call - testl $_TIF_IA32,TI_flags(%rcx) + + testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET jnz int_ret_from_sys_call - RESTORE_TOP_OF_STACK %rdi,ARGOFFSET - jmp ret_from_sys_call -rff_trace: - movq %rsp,%rdi - call syscall_trace_leave - GET_THREAD_INFO(%rcx) - jmp rff_action + + RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET + jmp ret_from_sys_call # go to the SYSRET fastpath + + CFI_RESTORE_STATE CFI_ENDPROC END(ret_from_fork) @@ -278,20 +439,20 @@ END(ret_from_fork) * SYSCALL does not save anything on the stack and does not change the * stack pointer. */ - + /* - * Register setup: + * Register setup: * rax system call number * rdi arg0 - * rcx return address for syscall/sysret, C arg3 + * rcx return address for syscall/sysret, C arg3 * rsi arg1 - * rdx arg2 + * rdx arg2 * r10 arg3 (--> moved to rcx for C) * r8 arg4 * r9 arg5 * r11 eflags for syscall/sysret, temporary for C - * r12-r15,rbp,rbx saved by C code, not touched. - * + * r12-r15,rbp,rbx saved by C code, not touched. + * * Interrupts are off on entry. * Only called from user space. * @@ -301,7 +462,7 @@ END(ret_from_fork) * When user can change the frames always force IRET. That is because * it deals with uncanonical addresses better. SYSRET has trouble * with them due to bugs in both AMD and Intel CPUs. - */ + */ ENTRY(system_call) CFI_STARTPROC simple @@ -317,7 +478,7 @@ ENTRY(system_call) */ ENTRY(system_call_after_swapgs) - movq %rsp,%gs:pda_oldrsp + movq %rsp,%gs:pda_oldrsp movq %gs:pda_kernelstack,%rsp /* * No need to follow this irqs off/on section - it's straight @@ -325,7 +486,7 @@ ENTRY(system_call_after_swapgs) */ ENABLE_INTERRUPTS(CLBR_NONE) SAVE_ARGS 8,1 - movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) CFI_REL_OFFSET rip,RIP-ARGOFFSET GET_THREAD_INFO(%rcx) @@ -339,19 +500,19 @@ system_call_fastpath: movq %rax,RAX-ARGOFFSET(%rsp) /* * Syscall return path ending with SYSRET (fast path) - * Has incomplete stack frame and undefined top of stack. - */ + * Has incomplete stack frame and undefined top of stack. + */ ret_from_sys_call: movl $_TIF_ALLWORK_MASK,%edi /* edi: flagmask */ -sysret_check: +sysret_check: LOCKDEP_SYS_EXIT GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF movl TI_flags(%rcx),%edx andl %edi,%edx - jnz sysret_careful + jnz sysret_careful CFI_REMEMBER_STATE /* * sysretq will re-enable interrupts: @@ -366,7 +527,7 @@ sysret_check: CFI_RESTORE_STATE /* Handle reschedules */ - /* edx: work, edi: workmask */ + /* edx: work, edi: workmask */ sysret_careful: bt $TIF_NEED_RESCHED,%edx jnc sysret_signal @@ -379,7 +540,7 @@ sysret_careful: CFI_ADJUST_CFA_OFFSET -8 jmp sysret_check - /* Handle a signal */ + /* Handle a signal */ sysret_signal: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) @@ -388,17 +549,20 @@ sysret_signal: jc sysret_audit #endif /* edx: work flags (arg3) */ - leaq do_notify_resume(%rip),%rax leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 xorl %esi,%esi # oldset -> arg2 - call ptregscall_common + SAVE_REST + FIXUP_TOP_OF_STACK %r11 + call do_notify_resume + RESTORE_TOP_OF_STACK %r11 + RESTORE_REST movl $_TIF_WORK_MASK,%edi /* Use IRET because user could have changed frame. This works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF jmp int_with_check - + badsys: movq $-ENOSYS,RAX-ARGOFFSET(%rsp) jmp ret_from_sys_call @@ -437,7 +601,7 @@ sysret_audit: #endif /* CONFIG_AUDITSYSCALL */ /* Do syscall tracing */ -tracesys: +tracesys: #ifdef CONFIG_AUDITSYSCALL testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) jz auditsys @@ -460,8 +624,8 @@ tracesys: call *sys_call_table(,%rax,8) movq %rax,RAX-ARGOFFSET(%rsp) /* Use IRET because user could have changed frame */ - -/* + +/* * Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. */ @@ -505,18 +669,18 @@ int_very_careful: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) SAVE_REST - /* Check for syscall exit trace */ + /* Check for syscall exit trace */ testl $_TIF_WORK_SYSCALL_EXIT,%edx jz int_signal pushq %rdi CFI_ADJUST_CFA_OFFSET 8 - leaq 8(%rsp),%rdi # &ptregs -> arg1 + leaq 8(%rsp),%rdi # &ptregs -> arg1 call syscall_trace_leave popq %rdi CFI_ADJUST_CFA_OFFSET -8 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi jmp int_restore_rest - + int_signal: testl $_TIF_DO_NOTIFY_MASK,%edx jz 1f @@ -531,22 +695,24 @@ int_restore_rest: jmp int_with_check CFI_ENDPROC END(system_call) - -/* + +/* * Certain special system calls that need to save a complete full stack frame. - */ - + */ .macro PTREGSCALL label,func,arg - .globl \label -\label: - leaq \func(%rip),%rax - leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ - jmp ptregscall_common +ENTRY(\label) + PARTIAL_FRAME 1 8 /* offset 8: return address */ + subq $REST_SKIP, %rsp + CFI_ADJUST_CFA_OFFSET REST_SKIP + call save_rest + DEFAULT_FRAME 0 8 /* offset 8: return address */ + leaq 8(%rsp), \arg /* pt_regs pointer */ + call \func + jmp ptregscall_common + CFI_ENDPROC END(\label) .endm - CFI_STARTPROC - PTREGSCALL stub_clone, sys_clone, %r8 PTREGSCALL stub_fork, sys_fork, %rdi PTREGSCALL stub_vfork, sys_vfork, %rdi @@ -554,25 +720,18 @@ END(\label) PTREGSCALL stub_iopl, sys_iopl, %rsi ENTRY(ptregscall_common) - popq %r11 - CFI_ADJUST_CFA_OFFSET -8 - CFI_REGISTER rip, r11 - SAVE_REST - movq %r11, %r15 - CFI_REGISTER rip, r15 - FIXUP_TOP_OF_STACK %r11 - call *%rax - RESTORE_TOP_OF_STACK %r11 - movq %r15, %r11 - CFI_REGISTER rip, r11 - RESTORE_REST - pushq %r11 - CFI_ADJUST_CFA_OFFSET 8 - CFI_REL_OFFSET rip, 0 - ret + DEFAULT_FRAME 1 8 /* offset 8: return address */ + RESTORE_TOP_OF_STACK %r11, 8 + movq_cfi_restore R15+8, r15 + movq_cfi_restore R14+8, r14 + movq_cfi_restore R13+8, r13 + movq_cfi_restore R12+8, r12 + movq_cfi_restore RBP+8, rbp + movq_cfi_restore RBX+8, rbx + ret $REST_SKIP /* pop extended registers */ CFI_ENDPROC END(ptregscall_common) - + ENTRY(stub_execve) CFI_STARTPROC popq %r11 @@ -588,11 +747,11 @@ ENTRY(stub_execve) jmp int_ret_from_sys_call CFI_ENDPROC END(stub_execve) - + /* * sigreturn is special because it needs to restore all registers on return. * This cannot be done with SYSRET, so use the IRET return path instead. - */ + */ ENTRY(stub_rt_sigreturn) CFI_STARTPROC addq $8, %rsp @@ -608,70 +767,70 @@ ENTRY(stub_rt_sigreturn) END(stub_rt_sigreturn) /* - * initial frame state for interrupts and exceptions + * Build the entry stubs and pointer table with some assembler magic. + * We pack 7 stubs into a single 32-byte chunk, which will fit in a + * single cache line on all modern x86 implementations. */ - .macro _frame ref - CFI_STARTPROC simple - CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,SS+8-\ref - /*CFI_REL_OFFSET ss,SS-\ref*/ - CFI_REL_OFFSET rsp,RSP-\ref - /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/ - /*CFI_REL_OFFSET cs,CS-\ref*/ - CFI_REL_OFFSET rip,RIP-\ref - .endm + .section .init.rodata,"a" +ENTRY(interrupt) + .text + .p2align 5 + .p2align CONFIG_X86_L1_CACHE_SHIFT +ENTRY(irq_entries_start) + INTR_FRAME +vector=FIRST_EXTERNAL_VECTOR +.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 + .balign 32 + .rept 7 + .if vector < NR_VECTORS + .if vector <> FIRST_EXTERNAL_VECTOR + CFI_ADJUST_CFA_OFFSET -8 + .endif +1: pushq $(~vector+0x80) /* Note: always in signed byte range */ + CFI_ADJUST_CFA_OFFSET 8 + .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 + jmp 2f + .endif + .previous + .quad 1b + .text +vector=vector+1 + .endif + .endr +2: jmp common_interrupt +.endr + CFI_ENDPROC +END(irq_entries_start) -/* initial frame state for interrupts (and exceptions without error code) */ -#define INTR_FRAME _frame RIP -/* initial frame state for exceptions with error code (and interrupts with - vector already pushed) */ -#define XCPT_FRAME _frame ORIG_RAX +.previous +END(interrupt) +.previous -/* +/* * Interrupt entry/exit. * * Interrupt entry points save only callee clobbered registers in fast path. - * - * Entry runs with interrupts off. - */ + * + * Entry runs with interrupts off. + */ -/* 0(%rsp): interrupt number */ +/* 0(%rsp): ~(interrupt number) */ .macro interrupt func - cld - SAVE_ARGS - leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler - pushq %rbp - /* - * Save rbp twice: One is for marking the stack frame, as usual, and the - * other, to fill pt_regs properly. This is because bx comes right - * before the last saved register in that structure, and not bp. If the - * base pointer were in the place bx is today, this would not be needed. - */ - movq %rbp, -8(%rsp) - CFI_ADJUST_CFA_OFFSET 8 - CFI_REL_OFFSET rbp, 0 - movq %rsp,%rbp - CFI_DEF_CFA_REGISTER rbp - testl $3,CS(%rdi) - je 1f - SWAPGS - /* irqcount is used to check if a CPU is already on an interrupt - stack or not. While this is essentially redundant with preempt_count - it is a little cheaper to use a separate counter in the PDA - (short of moving irq_enter into assembly, which would be too - much work) */ -1: incl %gs:pda_irqcount - cmoveq %gs:pda_irqstackptr,%rsp - push %rbp # backlink for old unwinder - /* - * We entered an interrupt context - irqs are off: - */ - TRACE_IRQS_OFF + subq $10*8, %rsp + CFI_ADJUST_CFA_OFFSET 10*8 + call save_args + PARTIAL_FRAME 0 call \func .endm -ENTRY(common_interrupt) + /* + * The interrupt stubs push (~vector+0x80) onto the stack and + * then jump to common_interrupt. + */ + .p2align CONFIG_X86_L1_CACHE_SHIFT +common_interrupt: XCPT_FRAME + addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ interrupt do_IRQ /* 0(%rsp): oldrsp-ARGOFFSET */ ret_from_intr: @@ -685,12 +844,12 @@ exit_intr: GET_THREAD_INFO(%rcx) testl $3,CS-ARGOFFSET(%rsp) je retint_kernel - + /* Interrupt came from user space */ /* * Has a correct top of stack, but a partial stack frame * %rcx: thread info. Interrupts off. - */ + */ retint_with_reschedule: movl $_TIF_WORK_MASK,%edi retint_check: @@ -763,20 +922,20 @@ retint_careful: pushq %rdi CFI_ADJUST_CFA_OFFSET 8 call schedule - popq %rdi + popq %rdi CFI_ADJUST_CFA_OFFSET -8 GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF jmp retint_check - + retint_signal: testl $_TIF_DO_NOTIFY_MASK,%edx jz retint_swapgs TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) SAVE_REST - movq $-1,ORIG_RAX(%rsp) + movq $-1,ORIG_RAX(%rsp) xorl %esi,%esi # oldset movq %rsp,%rdi # &pt_regs call do_notify_resume @@ -798,324 +957,211 @@ ENTRY(retint_kernel) jnc retint_restore_args call preempt_schedule_irq jmp exit_intr -#endif +#endif CFI_ENDPROC END(common_interrupt) - + /* * APIC interrupts. - */ - .macro apicinterrupt num,func + */ +.macro apicinterrupt num sym do_sym +ENTRY(\sym) INTR_FRAME pushq $~(\num) CFI_ADJUST_CFA_OFFSET 8 - interrupt \func + interrupt \do_sym jmp ret_from_intr CFI_ENDPROC - .endm - -ENTRY(thermal_interrupt) - apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt -END(thermal_interrupt) - -ENTRY(threshold_interrupt) - apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt -END(threshold_interrupt) - -#ifdef CONFIG_SMP -ENTRY(reschedule_interrupt) - apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt -END(reschedule_interrupt) - - .macro INVALIDATE_ENTRY num -ENTRY(invalidate_interrupt\num) - apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt -END(invalidate_interrupt\num) - .endm +END(\sym) +.endm - INVALIDATE_ENTRY 0 - INVALIDATE_ENTRY 1 - INVALIDATE_ENTRY 2 - INVALIDATE_ENTRY 3 - INVALIDATE_ENTRY 4 - INVALIDATE_ENTRY 5 - INVALIDATE_ENTRY 6 - INVALIDATE_ENTRY 7 - -ENTRY(call_function_interrupt) - apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt -END(call_function_interrupt) -ENTRY(call_function_single_interrupt) - apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt -END(call_function_single_interrupt) -ENTRY(irq_move_cleanup_interrupt) - apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt -END(irq_move_cleanup_interrupt) +#ifdef CONFIG_SMP +apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ + irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt #endif -ENTRY(apic_timer_interrupt) - apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt -END(apic_timer_interrupt) +apicinterrupt UV_BAU_MESSAGE \ + uv_bau_message_intr1 uv_bau_message_interrupt +apicinterrupt LOCAL_TIMER_VECTOR \ + apic_timer_interrupt smp_apic_timer_interrupt + +#ifdef CONFIG_SMP +apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ + invalidate_interrupt0 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ + invalidate_interrupt1 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \ + invalidate_interrupt2 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \ + invalidate_interrupt3 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \ + invalidate_interrupt4 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \ + invalidate_interrupt5 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ + invalidate_interrupt6 smp_invalidate_interrupt +apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ + invalidate_interrupt7 smp_invalidate_interrupt +#endif -ENTRY(uv_bau_message_intr1) - apicinterrupt 220,uv_bau_message_interrupt -END(uv_bau_message_intr1) +apicinterrupt THRESHOLD_APIC_VECTOR \ + threshold_interrupt mce_threshold_interrupt +apicinterrupt THERMAL_APIC_VECTOR \ + thermal_interrupt smp_thermal_interrupt + +#ifdef CONFIG_SMP +apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ + call_function_single_interrupt smp_call_function_single_interrupt +apicinterrupt CALL_FUNCTION_VECTOR \ + call_function_interrupt smp_call_function_interrupt +apicinterrupt RESCHEDULE_VECTOR \ + reschedule_interrupt smp_reschedule_interrupt +#endif -ENTRY(error_interrupt) - apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt -END(error_interrupt) +apicinterrupt ERROR_APIC_VECTOR \ + error_interrupt smp_error_interrupt +apicinterrupt SPURIOUS_APIC_VECTOR \ + spurious_interrupt smp_spurious_interrupt -ENTRY(spurious_interrupt) - apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt -END(spurious_interrupt) - /* * Exception entry points. - */ - .macro zeroentry sym + */ +.macro zeroentry sym do_sym +ENTRY(\sym) INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $0 /* push error code/oldrax */ - CFI_ADJUST_CFA_OFFSET 8 - pushq %rax /* push real oldrax to the rdi slot */ - CFI_ADJUST_CFA_OFFSET 8 - CFI_REL_OFFSET rax,0 - leaq \sym(%rip),%rax - jmp error_entry + pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ + subq $15*8,%rsp + CFI_ADJUST_CFA_OFFSET 15*8 + call error_entry + DEFAULT_FRAME 0 + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + call \do_sym + jmp error_exit /* %ebx: no swapgs flag */ CFI_ENDPROC - .endm +END(\sym) +.endm - .macro errorentry sym - XCPT_FRAME +.macro paranoidzeroentry sym do_sym +ENTRY(\sym) + INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq %rax + pushq $-1 /* ORIG_RAX: no syscall to restart */ CFI_ADJUST_CFA_OFFSET 8 - CFI_REL_OFFSET rax,0 - leaq \sym(%rip),%rax - jmp error_entry + subq $15*8, %rsp + call save_paranoid + TRACE_IRQS_OFF + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + call \do_sym + jmp paranoid_exit /* %ebx: no swapgs flag */ CFI_ENDPROC - .endm +END(\sym) +.endm - /* error code is on the stack already */ - /* handle NMI like exceptions that can happen everywhere */ - .macro paranoidentry sym, ist=0, irqtrace=1 - SAVE_ALL - cld - movl $1,%ebx - movl $MSR_GS_BASE,%ecx - rdmsr - testl %edx,%edx - js 1f - SWAPGS - xorl %ebx,%ebx -1: - .if \ist - movq %gs:pda_data_offset, %rbp - .endif - .if \irqtrace - TRACE_IRQS_OFF - .endif - movq %rsp,%rdi - movq ORIG_RAX(%rsp),%rsi - movq $-1,ORIG_RAX(%rsp) - .if \ist - subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) - .endif - call \sym - .if \ist - addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) - .endif - DISABLE_INTERRUPTS(CLBR_NONE) - .if \irqtrace +.macro paranoidzeroentry_ist sym do_sym ist +ENTRY(\sym) + INTR_FRAME + PARAVIRT_ADJUST_EXCEPTION_FRAME + pushq $-1 /* ORIG_RAX: no syscall to restart */ + CFI_ADJUST_CFA_OFFSET 8 + subq $15*8, %rsp + call save_paranoid TRACE_IRQS_OFF - .endif - .endm + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + movq %gs:pda_data_offset, %rbp + subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) + call \do_sym + addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +END(\sym) +.endm - /* - * "Paranoid" exit path from exception stack. - * Paranoid because this is used by NMIs and cannot take - * any kernel state for granted. - * We don't do kernel preemption checks here, because only - * NMI should be common and it does not enable IRQs and - * cannot get reschedule ticks. - * - * "trace" is 0 for the NMI handler only, because irq-tracing - * is fundamentally NMI-unsafe. (we cannot change the soft and - * hard flags at once, atomically) - */ - .macro paranoidexit trace=1 - /* ebx: no swapgs flag */ -paranoid_exit\trace: - testl %ebx,%ebx /* swapgs needed? */ - jnz paranoid_restore\trace - testl $3,CS(%rsp) - jnz paranoid_userspace\trace -paranoid_swapgs\trace: - .if \trace - TRACE_IRQS_IRETQ 0 - .endif - SWAPGS_UNSAFE_STACK -paranoid_restore\trace: - RESTORE_ALL 8 - jmp irq_return -paranoid_userspace\trace: - GET_THREAD_INFO(%rcx) - movl TI_flags(%rcx),%ebx - andl $_TIF_WORK_MASK,%ebx - jz paranoid_swapgs\trace - movq %rsp,%rdi /* &pt_regs */ - call sync_regs - movq %rax,%rsp /* switch stack for scheduling */ - testl $_TIF_NEED_RESCHED,%ebx - jnz paranoid_schedule\trace - movl %ebx,%edx /* arg3: thread flags */ - .if \trace - TRACE_IRQS_ON - .endif - ENABLE_INTERRUPTS(CLBR_NONE) - xorl %esi,%esi /* arg2: oldset */ - movq %rsp,%rdi /* arg1: &pt_regs */ - call do_notify_resume - DISABLE_INTERRUPTS(CLBR_NONE) - .if \trace - TRACE_IRQS_OFF - .endif - jmp paranoid_userspace\trace -paranoid_schedule\trace: - .if \trace - TRACE_IRQS_ON - .endif - ENABLE_INTERRUPTS(CLBR_ANY) - call schedule - DISABLE_INTERRUPTS(CLBR_ANY) - .if \trace - TRACE_IRQS_OFF - .endif - jmp paranoid_userspace\trace +.macro errorentry sym do_sym +ENTRY(\sym) + XCPT_FRAME + PARAVIRT_ADJUST_EXCEPTION_FRAME + subq $15*8,%rsp + CFI_ADJUST_CFA_OFFSET 15*8 + call error_entry + DEFAULT_FRAME 0 + movq %rsp,%rdi /* pt_regs pointer */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ + call \do_sym + jmp error_exit /* %ebx: no swapgs flag */ CFI_ENDPROC - .endm +END(\sym) +.endm -/* - * Exception entry point. This expects an error code/orig_rax on the stack - * and the exception handler in %rax. - */ -KPROBE_ENTRY(error_entry) - _frame RDI - CFI_REL_OFFSET rax,0 - /* rdi slot contains rax, oldrax contains error code */ - cld - subq $14*8,%rsp - CFI_ADJUST_CFA_OFFSET (14*8) - movq %rsi,13*8(%rsp) - CFI_REL_OFFSET rsi,RSI - movq 14*8(%rsp),%rsi /* load rax from rdi slot */ - CFI_REGISTER rax,rsi - movq %rdx,12*8(%rsp) - CFI_REL_OFFSET rdx,RDX - movq %rcx,11*8(%rsp) - CFI_REL_OFFSET rcx,RCX - movq %rsi,10*8(%rsp) /* store rax */ - CFI_REL_OFFSET rax,RAX - movq %r8, 9*8(%rsp) - CFI_REL_OFFSET r8,R8 - movq %r9, 8*8(%rsp) - CFI_REL_OFFSET r9,R9 - movq %r10,7*8(%rsp) - CFI_REL_OFFSET r10,R10 - movq %r11,6*8(%rsp) - CFI_REL_OFFSET r11,R11 - movq %rbx,5*8(%rsp) - CFI_REL_OFFSET rbx,RBX - movq %rbp,4*8(%rsp) - CFI_REL_OFFSET rbp,RBP - movq %r12,3*8(%rsp) - CFI_REL_OFFSET r12,R12 - movq %r13,2*8(%rsp) - CFI_REL_OFFSET r13,R13 - movq %r14,1*8(%rsp) - CFI_REL_OFFSET r14,R14 - movq %r15,(%rsp) - CFI_REL_OFFSET r15,R15 - xorl %ebx,%ebx - testl $3,CS(%rsp) - je error_kernelspace -error_swapgs: - SWAPGS -error_sti: - TRACE_IRQS_OFF - movq %rdi,RDI(%rsp) - CFI_REL_OFFSET rdi,RDI - movq %rsp,%rdi - movq ORIG_RAX(%rsp),%rsi /* get error code */ - movq $-1,ORIG_RAX(%rsp) - call *%rax - /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ -error_exit: - movl %ebx,%eax - RESTORE_REST - DISABLE_INTERRUPTS(CLBR_NONE) + /* error code is on the stack already */ +.macro paranoiderrorentry sym do_sym +ENTRY(\sym) + XCPT_FRAME + PARAVIRT_ADJUST_EXCEPTION_FRAME + subq $15*8,%rsp + CFI_ADJUST_CFA_OFFSET 15*8 + call save_paranoid + DEFAULT_FRAME 0 TRACE_IRQS_OFF - GET_THREAD_INFO(%rcx) - testl %eax,%eax - jne retint_kernel - LOCKDEP_SYS_EXIT_IRQ - movl TI_flags(%rcx),%edx - movl $_TIF_WORK_MASK,%edi - andl %edi,%edx - jnz retint_careful - jmp retint_swapgs + movq %rsp,%rdi /* pt_regs pointer */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ + call \do_sym + jmp paranoid_exit /* %ebx: no swapgs flag */ CFI_ENDPROC +END(\sym) +.endm -error_kernelspace: - incl %ebx - /* There are two places in the kernel that can potentially fault with - usergs. Handle them here. The exception handlers after - iret run with kernel gs again, so don't set the user space flag. - B stepping K8s sometimes report an truncated RIP for IRET - exceptions returning to compat mode. Check for these here too. */ - leaq irq_return(%rip),%rcx - cmpq %rcx,RIP(%rsp) - je error_swapgs - movl %ecx,%ecx /* zero extend */ - cmpq %rcx,RIP(%rsp) - je error_swapgs - cmpq $gs_change,RIP(%rsp) - je error_swapgs - jmp error_sti -KPROBE_END(error_entry) - - /* Reload gs selector with exception handling */ - /* edi: new selector */ +zeroentry divide_error do_divide_error +zeroentry overflow do_overflow +zeroentry bounds do_bounds +zeroentry invalid_op do_invalid_op +zeroentry device_not_available do_device_not_available +paranoiderrorentry double_fault do_double_fault +zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun +errorentry invalid_TSS do_invalid_TSS +errorentry segment_not_present do_segment_not_present +zeroentry spurious_interrupt_bug do_spurious_interrupt_bug +zeroentry coprocessor_error do_coprocessor_error +errorentry alignment_check do_alignment_check +zeroentry simd_coprocessor_error do_simd_coprocessor_error + + /* Reload gs selector with exception handling */ + /* edi: new selector */ ENTRY(native_load_gs_index) CFI_STARTPROC pushf CFI_ADJUST_CFA_OFFSET 8 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) - SWAPGS -gs_change: - movl %edi,%gs + SWAPGS +gs_change: + movl %edi,%gs 2: mfence /* workaround */ SWAPGS - popf + popf CFI_ADJUST_CFA_OFFSET -8 - ret + ret CFI_ENDPROC -ENDPROC(native_load_gs_index) - - .section __ex_table,"a" - .align 8 - .quad gs_change,bad_gs - .previous - .section .fixup,"ax" +END(native_load_gs_index) + + .section __ex_table,"a" + .align 8 + .quad gs_change,bad_gs + .previous + .section .fixup,"ax" /* running with kernelgs */ -bad_gs: +bad_gs: SWAPGS /* switch back to user gs */ xorl %eax,%eax - movl %eax,%gs - jmp 2b - .previous - + movl %eax,%gs + jmp 2b + .previous + /* * Create a kernel thread. * @@ -1138,7 +1184,7 @@ ENTRY(kernel_thread) xorl %r8d,%r8d xorl %r9d,%r9d - + # clone now call do_fork movq %rax,RAX(%rsp) @@ -1149,15 +1195,15 @@ ENTRY(kernel_thread) * so internally to the x86_64 port you can rely on kernel_thread() * not to reschedule the child before returning, this avoids the need * of hacks for example to fork off the per-CPU idle tasks. - * [Hopefully no generic code relies on the reschedule -AK] + * [Hopefully no generic code relies on the reschedule -AK] */ RESTORE_ALL UNFAKE_STACK_FRAME ret CFI_ENDPROC -ENDPROC(kernel_thread) - -child_rip: +END(kernel_thread) + +ENTRY(child_rip) pushq $0 # fake return address CFI_STARTPROC /* @@ -1170,8 +1216,9 @@ child_rip: # exit mov %eax, %edi call do_exit + ud2 # padding for call trace CFI_ENDPROC -ENDPROC(child_rip) +END(child_rip) /* * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. @@ -1191,10 +1238,10 @@ ENDPROC(child_rip) ENTRY(kernel_execve) CFI_STARTPROC FAKE_STACK_FRAME $0 - SAVE_ALL + SAVE_ALL movq %rsp,%rcx call sys_execve - movq %rax, RAX(%rsp) + movq %rax, RAX(%rsp) RESTORE_REST testq %rax,%rax je int_ret_from_sys_call @@ -1202,129 +1249,7 @@ ENTRY(kernel_execve) UNFAKE_STACK_FRAME ret CFI_ENDPROC -ENDPROC(kernel_execve) - -KPROBE_ENTRY(page_fault) - errorentry do_page_fault -KPROBE_END(page_fault) - -ENTRY(coprocessor_error) - zeroentry do_coprocessor_error -END(coprocessor_error) - -ENTRY(simd_coprocessor_error) - zeroentry do_simd_coprocessor_error -END(simd_coprocessor_error) - -ENTRY(device_not_available) - zeroentry do_device_not_available -END(device_not_available) - - /* runs on exception stack */ -KPROBE_ENTRY(debug) - INTR_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $0 - CFI_ADJUST_CFA_OFFSET 8 - paranoidentry do_debug, DEBUG_STACK - paranoidexit -KPROBE_END(debug) - - /* runs on exception stack */ -KPROBE_ENTRY(nmi) - INTR_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $-1 - CFI_ADJUST_CFA_OFFSET 8 - paranoidentry do_nmi, 0, 0 -#ifdef CONFIG_TRACE_IRQFLAGS - paranoidexit 0 -#else - jmp paranoid_exit1 - CFI_ENDPROC -#endif -KPROBE_END(nmi) - -KPROBE_ENTRY(int3) - INTR_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $0 - CFI_ADJUST_CFA_OFFSET 8 - paranoidentry do_int3, DEBUG_STACK - jmp paranoid_exit1 - CFI_ENDPROC -KPROBE_END(int3) - -ENTRY(overflow) - zeroentry do_overflow -END(overflow) - -ENTRY(bounds) - zeroentry do_bounds -END(bounds) - -ENTRY(invalid_op) - zeroentry do_invalid_op -END(invalid_op) - -ENTRY(coprocessor_segment_overrun) - zeroentry do_coprocessor_segment_overrun -END(coprocessor_segment_overrun) - - /* runs on exception stack */ -ENTRY(double_fault) - XCPT_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - paranoidentry do_double_fault - jmp paranoid_exit1 - CFI_ENDPROC -END(double_fault) - -ENTRY(invalid_TSS) - errorentry do_invalid_TSS -END(invalid_TSS) - -ENTRY(segment_not_present) - errorentry do_segment_not_present -END(segment_not_present) - - /* runs on exception stack */ -ENTRY(stack_segment) - XCPT_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - paranoidentry do_stack_segment - jmp paranoid_exit1 - CFI_ENDPROC -END(stack_segment) - -KPROBE_ENTRY(general_protection) - errorentry do_general_protection -KPROBE_END(general_protection) - -ENTRY(alignment_check) - errorentry do_alignment_check -END(alignment_check) - -ENTRY(divide_error) - zeroentry do_divide_error -END(divide_error) - -ENTRY(spurious_interrupt_bug) - zeroentry do_spurious_interrupt_bug -END(spurious_interrupt_bug) - -#ifdef CONFIG_X86_MCE - /* runs on exception stack */ -ENTRY(machine_check) - INTR_FRAME - PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $0 - CFI_ADJUST_CFA_OFFSET 8 - paranoidentry do_machine_check - jmp paranoid_exit1 - CFI_ENDPROC -END(machine_check) -#endif +END(kernel_execve) /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(call_softirq) @@ -1344,40 +1269,33 @@ ENTRY(call_softirq) decl %gs:pda_irqcount ret CFI_ENDPROC -ENDPROC(call_softirq) - -KPROBE_ENTRY(ignore_sysret) - CFI_STARTPROC - mov $-ENOSYS,%eax - sysret - CFI_ENDPROC -ENDPROC(ignore_sysret) +END(call_softirq) #ifdef CONFIG_XEN -ENTRY(xen_hypervisor_callback) - zeroentry xen_do_hypervisor_callback -END(xen_hypervisor_callback) +zeroentry xen_hypervisor_callback xen_do_hypervisor_callback /* -# A note on the "critical region" in our callback handler. -# We want to avoid stacking callback handlers due to events occurring -# during handling of the last event. To do this, we keep events disabled -# until we've done all processing. HOWEVER, we must enable events before -# popping the stack frame (can't be done atomically) and so it would still -# be possible to get enough handler activations to overflow the stack. -# Although unlikely, bugs of that kind are hard to track down, so we'd -# like to avoid the possibility. -# So, on entry to the handler we detect whether we interrupted an -# existing activation in its critical region -- if so, we pop the current -# activation and restart the handler using the previous one. -*/ + * A note on the "critical region" in our callback handler. + * We want to avoid stacking callback handlers due to events occurring + * during handling of the last event. To do this, we keep events disabled + * until we've done all processing. HOWEVER, we must enable events before + * popping the stack frame (can't be done atomically) and so it would still + * be possible to get enough handler activations to overflow the stack. + * Although unlikely, bugs of that kind are hard to track down, so we'd + * like to avoid the possibility. + * So, on entry to the handler we detect whether we interrupted an + * existing activation in its critical region -- if so, we pop the current + * activation and restart the handler using the previous one. + */ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) CFI_STARTPROC -/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will - see the correct pointer to the pt_regs */ +/* + * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will + * see the correct pointer to the pt_regs + */ movq %rdi, %rsp # we don't return, adjust the stack frame CFI_ENDPROC - CFI_DEFAULT_STACK + DEFAULT_FRAME 11: incl %gs:pda_irqcount movq %rsp,%rbp CFI_DEF_CFA_REGISTER rbp @@ -1392,23 +1310,26 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) END(do_hypervisor_callback) /* -# Hypervisor uses this for application faults while it executes. -# We get here for two reasons: -# 1. Fault while reloading DS, ES, FS or GS -# 2. Fault while executing IRET -# Category 1 we do not need to fix up as Xen has already reloaded all segment -# registers that could be reloaded and zeroed the others. -# Category 2 we fix up by killing the current process. We cannot use the -# normal Linux return path in this case because if we use the IRET hypercall -# to pop the stack frame we end up in an infinite loop of failsafe callbacks. -# We distinguish between categories by comparing each saved segment register -# with its current contents: any discrepancy means we in category 1. -*/ + * Hypervisor uses this for application faults while it executes. + * We get here for two reasons: + * 1. Fault while reloading DS, ES, FS or GS + * 2. Fault while executing IRET + * Category 1 we do not need to fix up as Xen has already reloaded all segment + * registers that could be reloaded and zeroed the others. + * Category 2 we fix up by killing the current process. We cannot use the + * normal Linux return path in this case because if we use the IRET hypercall + * to pop the stack frame we end up in an infinite loop of failsafe callbacks. + * We distinguish between categories by comparing each saved segment register + * with its current contents: any discrepancy means we in category 1. + */ ENTRY(xen_failsafe_callback) - framesz = (RIP-0x30) /* workaround buggy gas */ - _frame framesz - CFI_REL_OFFSET rcx, 0 - CFI_REL_OFFSET r11, 8 + INTR_FRAME 1 (6*8) + /*CFI_REL_OFFSET gs,GS*/ + /*CFI_REL_OFFSET fs,FS*/ + /*CFI_REL_OFFSET es,ES*/ + /*CFI_REL_OFFSET ds,DS*/ + CFI_REL_OFFSET r11,8 + CFI_REL_OFFSET rcx,0 movw %ds,%cx cmpw %cx,0x10(%rsp) CFI_REMEMBER_STATE @@ -1429,12 +1350,9 @@ ENTRY(xen_failsafe_callback) CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 - pushq $0 - CFI_ADJUST_CFA_OFFSET 8 - pushq %r11 - CFI_ADJUST_CFA_OFFSET 8 - pushq %rcx - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $0 /* RIP */ + pushq_cfi %r11 + pushq_cfi %rcx jmp general_protection CFI_RESTORE_STATE 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ @@ -1444,11 +1362,223 @@ ENTRY(xen_failsafe_callback) CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 - pushq $0 - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $0 SAVE_ALL jmp error_exit CFI_ENDPROC END(xen_failsafe_callback) #endif /* CONFIG_XEN */ + +/* + * Some functions should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" + +paranoidzeroentry_ist debug do_debug DEBUG_STACK +paranoidzeroentry_ist int3 do_int3 DEBUG_STACK +paranoiderrorentry stack_segment do_stack_segment +errorentry general_protection do_general_protection +errorentry page_fault do_page_fault +#ifdef CONFIG_X86_MCE +paranoidzeroentry machine_check do_machine_check +#endif + + /* + * "Paranoid" exit path from exception stack. + * Paranoid because this is used by NMIs and cannot take + * any kernel state for granted. + * We don't do kernel preemption checks here, because only + * NMI should be common and it does not enable IRQs and + * cannot get reschedule ticks. + * + * "trace" is 0 for the NMI handler only, because irq-tracing + * is fundamentally NMI-unsafe. (we cannot change the soft and + * hard flags at once, atomically) + */ + + /* ebx: no swapgs flag */ +ENTRY(paranoid_exit) + INTR_FRAME + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + testl %ebx,%ebx /* swapgs needed? */ + jnz paranoid_restore + testl $3,CS(%rsp) + jnz paranoid_userspace +paranoid_swapgs: + TRACE_IRQS_IRETQ 0 + SWAPGS_UNSAFE_STACK +paranoid_restore: + RESTORE_ALL 8 + jmp irq_return +paranoid_userspace: + GET_THREAD_INFO(%rcx) + movl TI_flags(%rcx),%ebx + andl $_TIF_WORK_MASK,%ebx + jz paranoid_swapgs + movq %rsp,%rdi /* &pt_regs */ + call sync_regs + movq %rax,%rsp /* switch stack for scheduling */ + testl $_TIF_NEED_RESCHED,%ebx + jnz paranoid_schedule + movl %ebx,%edx /* arg3: thread flags */ + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + xorl %esi,%esi /* arg2: oldset */ + movq %rsp,%rdi /* arg1: &pt_regs */ + call do_notify_resume + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + jmp paranoid_userspace +paranoid_schedule: + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_ANY) + call schedule + DISABLE_INTERRUPTS(CLBR_ANY) + TRACE_IRQS_OFF + jmp paranoid_userspace + CFI_ENDPROC +END(paranoid_exit) + +/* + * Exception entry point. This expects an error code/orig_rax on the stack. + * returns in "no swapgs flag" in %ebx. + */ +ENTRY(error_entry) + XCPT_FRAME + CFI_ADJUST_CFA_OFFSET 15*8 + /* oldrax contains error code */ + cld + movq_cfi rdi, RDI+8 + movq_cfi rsi, RSI+8 + movq_cfi rdx, RDX+8 + movq_cfi rcx, RCX+8 + movq_cfi rax, RAX+8 + movq_cfi r8, R8+8 + movq_cfi r9, R9+8 + movq_cfi r10, R10+8 + movq_cfi r11, R11+8 + movq_cfi rbx, RBX+8 + movq_cfi rbp, RBP+8 + movq_cfi r12, R12+8 + movq_cfi r13, R13+8 + movq_cfi r14, R14+8 + movq_cfi r15, R15+8 + xorl %ebx,%ebx + testl $3,CS+8(%rsp) + je error_kernelspace +error_swapgs: + SWAPGS +error_sti: + TRACE_IRQS_OFF + ret + CFI_ENDPROC + +/* + * There are two places in the kernel that can potentially fault with + * usergs. Handle them here. The exception handlers after iret run with + * kernel gs again, so don't set the user space flag. B stepping K8s + * sometimes report an truncated RIP for IRET exceptions returning to + * compat mode. Check for these here too. + */ +error_kernelspace: + incl %ebx + leaq irq_return(%rip),%rcx + cmpq %rcx,RIP+8(%rsp) + je error_swapgs + movl %ecx,%ecx /* zero extend */ + cmpq %rcx,RIP+8(%rsp) + je error_swapgs + cmpq $gs_change,RIP+8(%rsp) + je error_swapgs + jmp error_sti +END(error_entry) + + +/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ +ENTRY(error_exit) + DEFAULT_FRAME + movl %ebx,%eax + RESTORE_REST + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + GET_THREAD_INFO(%rcx) + testl %eax,%eax + jne retint_kernel + LOCKDEP_SYS_EXIT_IRQ + movl TI_flags(%rcx),%edx + movl $_TIF_WORK_MASK,%edi + andl %edi,%edx + jnz retint_careful + jmp retint_swapgs + CFI_ENDPROC +END(error_exit) + + + /* runs on exception stack */ +ENTRY(nmi) + INTR_FRAME + PARAVIRT_ADJUST_EXCEPTION_FRAME + pushq_cfi $-1 + subq $15*8, %rsp + CFI_ADJUST_CFA_OFFSET 15*8 + call save_paranoid + DEFAULT_FRAME 0 + /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ + movq %rsp,%rdi + movq $-1,%rsi + call do_nmi +#ifdef CONFIG_TRACE_IRQFLAGS + /* paranoidexit; without TRACE_IRQS_OFF */ + /* ebx: no swapgs flag */ + DISABLE_INTERRUPTS(CLBR_NONE) + testl %ebx,%ebx /* swapgs needed? */ + jnz nmi_restore + testl $3,CS(%rsp) + jnz nmi_userspace +nmi_swapgs: + SWAPGS_UNSAFE_STACK +nmi_restore: + RESTORE_ALL 8 + jmp irq_return +nmi_userspace: + GET_THREAD_INFO(%rcx) + movl TI_flags(%rcx),%ebx + andl $_TIF_WORK_MASK,%ebx + jz nmi_swapgs + movq %rsp,%rdi /* &pt_regs */ + call sync_regs + movq %rax,%rsp /* switch stack for scheduling */ + testl $_TIF_NEED_RESCHED,%ebx + jnz nmi_schedule + movl %ebx,%edx /* arg3: thread flags */ + ENABLE_INTERRUPTS(CLBR_NONE) + xorl %esi,%esi /* arg2: oldset */ + movq %rsp,%rdi /* arg1: &pt_regs */ + call do_notify_resume + DISABLE_INTERRUPTS(CLBR_NONE) + jmp nmi_userspace +nmi_schedule: + ENABLE_INTERRUPTS(CLBR_ANY) + call schedule + DISABLE_INTERRUPTS(CLBR_ANY) + jmp nmi_userspace + CFI_ENDPROC +#else + jmp paranoid_exit + CFI_ENDPROC +#endif +END(nmi) + +ENTRY(ignore_sysret) + CFI_STARTPROC + mov $-ENOSYS,%eax + sysret + CFI_ENDPROC +END(ignore_sysret) + +/* + * End of kprobes section + */ + .popsection diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index 0aa2c443d600..53699c931ad4 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -38,8 +38,11 @@ #include <asm/io.h> #include <asm/nmi.h> #include <asm/smp.h> +#include <asm/atomic.h> #include <asm/apicdef.h> #include <mach_mpparse.h> +#include <asm/genapic.h> +#include <asm/setup.h> /* * ES7000 chipsets @@ -161,6 +164,43 @@ es7000_rename_gsi(int ioapic, int gsi) return gsi; } +static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) +{ + unsigned long vect = 0, psaival = 0; + + if (psai == NULL) + return -1; + + vect = ((unsigned long)__pa(eip)/0x1000) << 16; + psaival = (0x1000000 | vect | cpu); + + while (*psai & 0x1000000) + ; + + *psai = psaival; + + return 0; +} + +static void noop_wait_for_deassert(atomic_t *deassert_not_used) +{ +} + +static int __init es7000_update_genapic(void) +{ + genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; + + /* MPENTIUMIII */ + if (boot_cpu_data.x86 == 6 && + (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { + es7000_update_genapic_to_cluster(); + genapic->wait_for_init_deassert = noop_wait_for_deassert; + genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; + } + + return 0; +} + void __init setup_unisys(void) { @@ -176,6 +216,8 @@ setup_unisys(void) else es7000_plat = ES7000_CLASSIC; ioapic_renumber_irq = es7000_rename_gsi; + + x86_quirks->update_genapic = es7000_update_genapic; } /* @@ -317,26 +359,6 @@ es7000_mip_write(struct mip_reg *mip_reg) return status; } -int -es7000_start_cpu(int cpu, unsigned long eip) -{ - unsigned long vect = 0, psaival = 0; - - if (psai == NULL) - return -1; - - vect = ((unsigned long)__pa(eip)/0x1000) << 16; - psaival = (0x1000000 | vect | cpu); - - while (*psai & 0x1000000) - ; - - *psai = psaival; - - return 0; - -} - void __init es7000_sw_apic(void) { diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 50ea0ac8c9bf..1b43086b097a 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -14,14 +14,17 @@ #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/percpu.h> +#include <linux/sched.h> #include <linux/init.h> #include <linux/list.h> #include <asm/ftrace.h> +#include <linux/ftrace.h> #include <asm/nops.h> +#include <asm/nmi.h> -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; +#ifdef CONFIG_DYNAMIC_FTRACE union ftrace_code_union { char code[MCOUNT_INSN_SIZE]; @@ -31,18 +34,12 @@ union ftrace_code_union { } __attribute__((packed)); }; - static int ftrace_calc_offset(long ip, long addr) { return (int)(addr - ip); } -unsigned char *ftrace_nop_replace(void) -{ - return ftrace_nop; -} - -unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { static union ftrace_code_union calc; @@ -56,7 +53,142 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) return calc.code; } -int +/* + * Modifying code must take extra care. On an SMP machine, if + * the code being modified is also being executed on another CPU + * that CPU will have undefined results and possibly take a GPF. + * We use kstop_machine to stop other CPUS from exectuing code. + * But this does not stop NMIs from happening. We still need + * to protect against that. We separate out the modification of + * the code to take care of this. + * + * Two buffers are added: An IP buffer and a "code" buffer. + * + * 1) Put the instruction pointer into the IP buffer + * and the new code into the "code" buffer. + * 2) Set a flag that says we are modifying code + * 3) Wait for any running NMIs to finish. + * 4) Write the code + * 5) clear the flag. + * 6) Wait for any running NMIs to finish. + * + * If an NMI is executed, the first thing it does is to call + * "ftrace_nmi_enter". This will check if the flag is set to write + * and if it is, it will write what is in the IP and "code" buffers. + * + * The trick is, it does not matter if everyone is writing the same + * content to the code location. Also, if a CPU is executing code + * it is OK to write to that code location if the contents being written + * are the same as what exists. + */ + +static atomic_t in_nmi = ATOMIC_INIT(0); +static int mod_code_status; /* holds return value of text write */ +static int mod_code_write; /* set when NMI should do the write */ +static void *mod_code_ip; /* holds the IP to write to */ +static void *mod_code_newcode; /* holds the text to write to the IP */ + +static unsigned nmi_wait_count; +static atomic_t nmi_update_count = ATOMIC_INIT(0); + +int ftrace_arch_read_dyn_info(char *buf, int size) +{ + int r; + + r = snprintf(buf, size, "%u %u", + nmi_wait_count, + atomic_read(&nmi_update_count)); + return r; +} + +static void ftrace_mod_code(void) +{ + /* + * Yes, more than one CPU process can be writing to mod_code_status. + * (and the code itself) + * But if one were to fail, then they all should, and if one were + * to succeed, then they all should. + */ + mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, + MCOUNT_INSN_SIZE); +} + +void ftrace_nmi_enter(void) +{ + atomic_inc(&in_nmi); + /* Must have in_nmi seen before reading write flag */ + smp_mb(); + if (mod_code_write) { + ftrace_mod_code(); + atomic_inc(&nmi_update_count); + } +} + +void ftrace_nmi_exit(void) +{ + /* Finish all executions before clearing in_nmi */ + smp_wmb(); + atomic_dec(&in_nmi); +} + +static void wait_for_nmi(void) +{ + int waited = 0; + + while (atomic_read(&in_nmi)) { + waited = 1; + cpu_relax(); + } + + if (waited) + nmi_wait_count++; +} + +static int +do_ftrace_mod_code(unsigned long ip, void *new_code) +{ + mod_code_ip = (void *)ip; + mod_code_newcode = new_code; + + /* The buffers need to be visible before we let NMIs write them */ + smp_wmb(); + + mod_code_write = 1; + + /* Make sure write bit is visible before we wait on NMIs */ + smp_mb(); + + wait_for_nmi(); + + /* Make sure all running NMIs have finished before we write the code */ + smp_mb(); + + ftrace_mod_code(); + + /* Make sure the write happens before clearing the bit */ + smp_wmb(); + + mod_code_write = 0; + + /* make sure NMIs see the cleared bit */ + smp_mb(); + + wait_for_nmi(); + + return mod_code_status; +} + + + + +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; + +static unsigned char *ftrace_nop_replace(void) +{ + return ftrace_nop; +} + +static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code) { @@ -81,7 +213,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, return -EINVAL; /* replace the text with the new text */ - if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) + if (do_ftrace_mod_code(ip, new_code)) return -EPERM; sync_core(); @@ -89,6 +221,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, return 0; } +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_call_replace(ip, addr); + new = ftrace_nop_replace(); + + return ftrace_modify_code(rec->ip, old, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(); + new = ftrace_call_replace(ip, addr); + + return ftrace_modify_code(rec->ip, old, new); +} + int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); @@ -165,3 +320,218 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +static int ftrace_mod_jmp(unsigned long ip, + int old_offset, int new_offset) +{ + unsigned char code[MCOUNT_INSN_SIZE]; + + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) + return -EINVAL; + + *(int *)(&code[1]) = new_offset; + + if (do_ftrace_mod_code(ip, &code)) + return -EPERM; + + return 0; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + int old_offset, new_offset; + + old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); + new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); + + return ftrace_mod_jmp(ip, old_offset, new_offset); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + int old_offset, new_offset; + + old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); + new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); + + return ftrace_mod_jmp(ip, old_offset, new_offset); +} + +#else /* CONFIG_DYNAMIC_FTRACE */ + +/* + * These functions are picked from those used on + * this page for dynamic ftrace. They have been + * simplified to ignore all traces in NMI context. + */ +static atomic_t in_nmi; + +void ftrace_nmi_enter(void) +{ + atomic_inc(&in_nmi); +} + +void ftrace_nmi_exit(void) +{ + atomic_dec(&in_nmi); +} + +#endif /* !CONFIG_DYNAMIC_FTRACE */ + +/* Add a function return address to the trace stack on thread info.*/ +static int push_return_trace(unsigned long ret, unsigned long long time, + unsigned long func, int *depth) +{ + int index; + + if (!current->ret_stack) + return -EBUSY; + + /* The return trace stack is full */ + if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { + atomic_inc(¤t->trace_overrun); + return -EBUSY; + } + + index = ++current->curr_ret_stack; + barrier(); + current->ret_stack[index].ret = ret; + current->ret_stack[index].func = func; + current->ret_stack[index].calltime = time; + *depth = index; + + return 0; +} + +/* Retrieve a function return address to the trace stack on thread info.*/ +static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) +{ + int index; + + index = current->curr_ret_stack; + + if (unlikely(index < 0)) { + ftrace_graph_stop(); + WARN_ON(1); + /* Might as well panic, otherwise we have no where to go */ + *ret = (unsigned long)panic; + return; + } + + *ret = current->ret_stack[index].ret; + trace->func = current->ret_stack[index].func; + trace->calltime = current->ret_stack[index].calltime; + trace->overrun = atomic_read(¤t->trace_overrun); + trace->depth = index; + barrier(); + current->curr_ret_stack--; + +} + +/* + * Send the trace to the ring-buffer. + * @return the original return address. + */ +unsigned long ftrace_return_to_handler(void) +{ + struct ftrace_graph_ret trace; + unsigned long ret; + + pop_return_trace(&trace, &ret); + trace.rettime = cpu_clock(raw_smp_processor_id()); + ftrace_graph_return(&trace); + + if (unlikely(!ret)) { + ftrace_graph_stop(); + WARN_ON(1); + /* Might as well panic. What else to do? */ + ret = (unsigned long)panic; + } + + return ret; +} + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + unsigned long long calltime; + int faulted; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long) + &return_to_handler; + + /* Nmi's are currently unsupported */ + if (unlikely(atomic_read(&in_nmi))) + return; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + asm volatile( + "1: " _ASM_MOV " (%[parent_old]), %[old]\n" + "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" + " movl $0, %[faulted]\n" + + ".section .fixup, \"ax\"\n" + "3: movl $1, %[faulted]\n" + ".previous\n" + + _ASM_EXTABLE(1b, 3b) + _ASM_EXTABLE(2b, 3b) + + : [parent_replaced] "=r" (parent), [old] "=r" (old), + [faulted] "=r" (faulted) + : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) + : "memory" + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + if (unlikely(!__kernel_text_address(old))) { + ftrace_graph_stop(); + *parent = old; + WARN_ON(1); + return; + } + + calltime = cpu_clock(raw_smp_processor_id()); + + if (push_return_trace(old, calltime, + self_addr, &trace.depth) == -EBUSY) { + *parent = old; + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + *parent = old; + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 6c9bfc9e1e95..2bced78b0b8e 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -21,6 +21,7 @@ #include <asm/smp.h> #include <asm/ipi.h> #include <asm/genapic.h> +#include <asm/setup.h> extern struct genapic apic_flat; extern struct genapic apic_physflat; @@ -53,6 +54,9 @@ void __init setup_apic_routing(void) genapic = &apic_physflat; printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); } + + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); } /* Same for both flat and physical. */ diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 2c7dbdb98278..dece17289731 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/threads.h> +#include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/string.h> #include <linux/ctype.h> @@ -17,6 +18,9 @@ #include <linux/sched.h> #include <linux/module.h> #include <linux/hardirq.h> +#include <linux/timer.h> +#include <linux/proc_fs.h> +#include <asm/current.h> #include <asm/smp.h> #include <asm/ipi.h> #include <asm/genapic.h> @@ -356,6 +360,103 @@ static __init void uv_rtc_init(void) } /* + * percpu heartbeat timer + */ +static void uv_heartbeat(unsigned long ignored) +{ + struct timer_list *timer = &uv_hub_info->scir.timer; + unsigned char bits = uv_hub_info->scir.state; + + /* flip heartbeat bit */ + bits ^= SCIR_CPU_HEARTBEAT; + + /* is this cpu idle? */ + if (idle_cpu(raw_smp_processor_id())) + bits &= ~SCIR_CPU_ACTIVITY; + else + bits |= SCIR_CPU_ACTIVITY; + + /* update system controller interface reg */ + uv_set_scir_bits(bits); + + /* enable next timer period */ + mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); +} + +static void __cpuinit uv_heartbeat_enable(int cpu) +{ + if (!uv_cpu_hub_info(cpu)->scir.enabled) { + struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; + + uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); + setup_timer(timer, uv_heartbeat, cpu); + timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; + add_timer_on(timer, cpu); + uv_cpu_hub_info(cpu)->scir.enabled = 1; + } + + /* check boot cpu */ + if (!uv_cpu_hub_info(0)->scir.enabled) + uv_heartbeat_enable(0); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void __cpuinit uv_heartbeat_disable(int cpu) +{ + if (uv_cpu_hub_info(cpu)->scir.enabled) { + uv_cpu_hub_info(cpu)->scir.enabled = 0; + del_timer(&uv_cpu_hub_info(cpu)->scir.timer); + } + uv_set_cpu_scir_bits(cpu, 0xff); +} + +/* + * cpu hotplug notifier + */ +static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action) { + case CPU_ONLINE: + uv_heartbeat_enable(cpu); + break; + case CPU_DOWN_PREPARE: + uv_heartbeat_disable(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static __init void uv_scir_register_cpu_notifier(void) +{ + hotcpu_notifier(uv_scir_cpu_notify, 0); +} + +#else /* !CONFIG_HOTPLUG_CPU */ + +static __init void uv_scir_register_cpu_notifier(void) +{ +} + +static __init int uv_init_heartbeat(void) +{ + int cpu; + + if (is_uv_system()) + for_each_online_cpu(cpu) + uv_heartbeat_enable(cpu); + return 0; +} + +late_initcall(uv_init_heartbeat); + +#endif /* !CONFIG_HOTPLUG_CPU */ + +/* * Called on each cpu to initialize the per_cpu UV data area. * ZZZ hotplug not supported yet */ @@ -428,7 +529,7 @@ void __init uv_system_init(void) uv_bios_init(); uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, - &uv_coherency_id, &uv_region_size); + &sn_coherency_id, &sn_region_size); uv_rtc_init(); for_each_present_cpu(cpu) { @@ -439,8 +540,7 @@ void __init uv_system_init(void) uv_blade_info[blade].nr_possible_cpus++; uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; - uv_cpu_hub_info(cpu)->lowmem_remap_top = - lowmem_redir_base + lowmem_redir_size; + uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; uv_cpu_hub_info(cpu)->n_val = m_val; uv_cpu_hub_info(cpu)->numa_blade_id = blade; @@ -450,7 +550,8 @@ void __init uv_system_init(void) uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; - uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id; + uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; + uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; uv_node_to_blade[nid] = blade; uv_cpu_to_blade[cpu] = blade; max_pnode = max(pnode, max_pnode); @@ -467,4 +568,6 @@ void __init uv_system_init(void) map_mmioh_high(max_pnode); uv_cpu_init(); + uv_scir_register_cpu_notifier(); + proc_mkdir("sgi_uv", NULL); } diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 1dcb0f13897e..3e66bd364a9d 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -35,7 +35,6 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ ebda_addr = get_bios_ebda(); - printk(KERN_INFO "BIOS EBDA/lowmem at: %08x/%08x\n", ebda_addr, lowmem); /* Fixup: bios puts an EBDA in the top 64K segment */ /* of conventional memory, but does not adjust lowmem. */ diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index fa1d25dd83e3..ac108d1fe182 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -12,9 +12,12 @@ #include <asm/sections.h> #include <asm/e820.h> #include <asm/bios_ebda.h> +#include <asm/trampoline.h> void __init i386_start_kernel(void) { + reserve_trampoline_memory(); + reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index d16084f90649..388e05a5fc17 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -24,6 +24,7 @@ #include <asm/kdebug.h> #include <asm/e820.h> #include <asm/bios_ebda.h> +#include <asm/trampoline.h> /* boot cpu pda */ static struct x8664_pda _boot_cpu_pda __read_mostly; @@ -120,6 +121,8 @@ void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); + reserve_trampoline_memory(); + reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 067d8de913f6..3f0a3edf0a57 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -33,7 +33,9 @@ * HPET address is set in acpi/boot.c, when an ACPI entry exists */ unsigned long hpet_address; -unsigned long hpet_num_timers; +#ifdef CONFIG_PCI_MSI +static unsigned long hpet_num_timers; +#endif static void __iomem *hpet_virt_address; struct hpet_dev { diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index a4f93b4120c1..d39918076bb4 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -14,7 +14,6 @@ static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_UNUSED_SYMBOL(init_mm); /* will be removed in 2.6.26 */ /* * Initial thread structure. diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 9043251210fb..679e7bbbbcd6 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2216,10 +2216,9 @@ static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) asmlinkage void smp_irq_move_cleanup_interrupt(void) { unsigned vector, me; + ack_APIC_irq(); -#ifdef CONFIG_X86_64 exit_idle(); -#endif irq_enter(); me = smp_processor_id(); diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 60eb84eb77a0..1df869e5bd0b 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -13,12 +13,12 @@ #include <linux/seq_file.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/ftrace.h> #include <asm/uaccess.h> #include <asm/io_apic.h> #include <asm/idle.h> #include <asm/smp.h> -#ifdef CONFIG_DEBUG_STACKOVERFLOW /* * Probabilistic stack overflow check: * @@ -28,26 +28,25 @@ */ static inline void stack_overflow_check(struct pt_regs *regs) { +#ifdef CONFIG_DEBUG_STACKOVERFLOW u64 curbase = (u64)task_stack_page(current); - static unsigned long warned = -60*HZ; - - if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE && - regs->sp < curbase + sizeof(struct thread_info) + 128 && - time_after(jiffies, warned + 60*HZ)) { - printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n", - current->comm, curbase, regs->sp); - show_stack(NULL,NULL); - warned = jiffies; - } -} + + WARN_ONCE(regs->sp >= curbase && + regs->sp <= curbase + THREAD_SIZE && + regs->sp < curbase + sizeof(struct thread_info) + + sizeof(struct pt_regs) + 128, + + "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n", + current->comm, curbase, regs->sp); #endif +} /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ -asmlinkage unsigned int do_IRQ(struct pt_regs *regs) +asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc *desc; @@ -60,9 +59,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; -#ifdef CONFIG_DEBUG_STACKOVERFLOW stack_overflow_check(regs); -#endif desc = irq_to_desc(irq); if (likely(desc)) diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 845aa9803e80..607db63044a5 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -129,7 +129,7 @@ void __init native_init_IRQ(void) for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { /* SYSCALL_VECTOR was reserved in trap_init. */ if (i != SYSCALL_VECTOR) - set_intr_gate(i, interrupt[i]); + set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); } diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index ff0235391285..8670b3ce626e 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -24,41 +24,6 @@ #include <asm/i8259.h> /* - * Common place to define all x86 IRQ vectors - * - * This builds up the IRQ handler stubs using some ugly macros in irq.h - * - * These macros create the low-level assembly IRQ routines that save - * register context and call do_IRQ(). do_IRQ() then does all the - * operations that are needed to keep the AT (or SMP IOAPIC) - * interrupt-controller happy. - */ - -#define IRQ_NAME2(nr) nr##_interrupt(void) -#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) - -/* - * SMP has a few special interrupts for IPI messages - */ - -#define BUILD_IRQ(nr) \ - asmlinkage void IRQ_NAME(nr); \ - asm("\n.text\n.p2align\n" \ - "IRQ" #nr "_interrupt:\n\t" \ - "push $~(" #nr ") ; " \ - "jmp common_interrupt\n" \ - ".previous"); - -#define BI(x,y) \ - BUILD_IRQ(x##y) - -#define BUILD_16_IRQS(x) \ - BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ - BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ - BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ - BI(x,c) BI(x,d) BI(x,e) BI(x,f) - -/* * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: * (these are usually mapped to vectors 0x30-0x3f) */ @@ -73,37 +38,6 @@ * * (these are usually mapped into the 0x30-0xff vector range) */ - BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3) -BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7) -BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) -BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf) - -#undef BUILD_16_IRQS -#undef BI - - -#define IRQ(x,y) \ - IRQ##x##y##_interrupt - -#define IRQLIST_16(x) \ - IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ - IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ - IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ - IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) - -/* for the irq vectors */ -static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = { - IRQLIST_16(0x2), IRQLIST_16(0x3), - IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7), - IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb), - IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf) -}; - -#undef IRQ -#undef IRQLIST_16 - - - /* * IRQ2 is cascade interrupt to second interrupt controller diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 7a385746509a..37f420018a41 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -13,6 +13,7 @@ #include <linux/numa.h> #include <linux/ftrace.h> #include <linux/suspend.h> +#include <linux/gfp.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -25,15 +26,6 @@ #include <asm/system.h> #include <asm/cacheflush.h> -#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) -static u32 kexec_pgd[1024] PAGE_ALIGNED; -#ifdef CONFIG_X86_PAE -static u32 kexec_pmd0[1024] PAGE_ALIGNED; -static u32 kexec_pmd1[1024] PAGE_ALIGNED; -#endif -static u32 kexec_pte0[1024] PAGE_ALIGNED; -static u32 kexec_pte1[1024] PAGE_ALIGNED; - static void set_idt(void *newidt, __u16 limit) { struct desc_ptr curidt; @@ -76,6 +68,76 @@ static void load_segments(void) #undef __STR } +static void machine_kexec_free_page_tables(struct kimage *image) +{ + free_page((unsigned long)image->arch.pgd); +#ifdef CONFIG_X86_PAE + free_page((unsigned long)image->arch.pmd0); + free_page((unsigned long)image->arch.pmd1); +#endif + free_page((unsigned long)image->arch.pte0); + free_page((unsigned long)image->arch.pte1); +} + +static int machine_kexec_alloc_page_tables(struct kimage *image) +{ + image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); +#ifdef CONFIG_X86_PAE + image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL); + image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL); +#endif + image->arch.pte0 = (pte_t *)get_zeroed_page(GFP_KERNEL); + image->arch.pte1 = (pte_t *)get_zeroed_page(GFP_KERNEL); + if (!image->arch.pgd || +#ifdef CONFIG_X86_PAE + !image->arch.pmd0 || !image->arch.pmd1 || +#endif + !image->arch.pte0 || !image->arch.pte1) { + machine_kexec_free_page_tables(image); + return -ENOMEM; + } + return 0; +} + +static void machine_kexec_page_table_set_one( + pgd_t *pgd, pmd_t *pmd, pte_t *pte, + unsigned long vaddr, unsigned long paddr) +{ + pud_t *pud; + + pgd += pgd_index(vaddr); +#ifdef CONFIG_X86_PAE + if (!(pgd_val(*pgd) & _PAGE_PRESENT)) + set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); +#endif + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); + if (!(pmd_val(*pmd) & _PAGE_PRESENT)) + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); + pte = pte_offset_kernel(pmd, vaddr); + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); +} + +static void machine_kexec_prepare_page_tables(struct kimage *image) +{ + void *control_page; + pmd_t *pmd = 0; + + control_page = page_address(image->control_code_page); +#ifdef CONFIG_X86_PAE + pmd = image->arch.pmd0; +#endif + machine_kexec_page_table_set_one( + image->arch.pgd, pmd, image->arch.pte0, + (unsigned long)control_page, __pa(control_page)); +#ifdef CONFIG_X86_PAE + pmd = image->arch.pmd1; +#endif + machine_kexec_page_table_set_one( + image->arch.pgd, pmd, image->arch.pte1, + __pa(control_page), __pa(control_page)); +} + /* * A architecture hook called to validate the * proposed image and prepare the control pages @@ -87,12 +149,20 @@ static void load_segments(void) * reboot code buffer to allow us to avoid allocations * later. * - * Make control page executable. + * - Make control page executable. + * - Allocate page tables + * - Setup page tables */ int machine_kexec_prepare(struct kimage *image) { + int error; + if (nx_enabled) set_pages_x(image->control_code_page, 1); + error = machine_kexec_alloc_page_tables(image); + if (error) + return error; + machine_kexec_prepare_page_tables(image); return 0; } @@ -104,6 +174,7 @@ void machine_kexec_cleanup(struct kimage *image) { if (nx_enabled) set_pages_nx(image->control_code_page, 1); + machine_kexec_free_page_tables(image); } /* @@ -150,18 +221,7 @@ void machine_kexec(struct kimage *image) relocate_kernel_ptr = control_page; page_list[PA_CONTROL_PAGE] = __pa(control_page); page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; - page_list[PA_PGD] = __pa(kexec_pgd); - page_list[VA_PGD] = (unsigned long)kexec_pgd; -#ifdef CONFIG_X86_PAE - page_list[PA_PMD_0] = __pa(kexec_pmd0); - page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; - page_list[PA_PMD_1] = __pa(kexec_pmd1); - page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; -#endif - page_list[PA_PTE_0] = __pa(kexec_pte0); - page_list[VA_PTE_0] = (unsigned long)kexec_pte0; - page_list[PA_PTE_1] = __pa(kexec_pte1); - page_list[VA_PTE_1] = (unsigned long)kexec_pte1; + page_list[PA_PGD] = __pa(image->arch.pgd); if (image->type == KEXEC_TYPE_DEFAULT) page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 5f8e5d75a254..c25fdb382292 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -10,7 +10,7 @@ * This driver allows to upgrade microcode on AMD * family 0x10 and 0x11 processors. * - * Licensed unter the terms of the GNU General Public + * Licensed under the terms of the GNU General Public * License version 2. See file COPYING for details. */ @@ -32,9 +32,9 @@ #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/pci_ids.h> +#include <linux/uaccess.h> #include <asm/msr.h> -#include <asm/uaccess.h> #include <asm/processor.h> #include <asm/microcode.h> @@ -47,43 +47,38 @@ MODULE_LICENSE("GPL v2"); #define UCODE_UCODE_TYPE 0x00000001 struct equiv_cpu_entry { - unsigned int installed_cpu; - unsigned int fixed_errata_mask; - unsigned int fixed_errata_compare; - unsigned int equiv_cpu; -}; + u32 installed_cpu; + u32 fixed_errata_mask; + u32 fixed_errata_compare; + u16 equiv_cpu; + u16 res; +} __attribute__((packed)); struct microcode_header_amd { - unsigned int data_code; - unsigned int patch_id; - unsigned char mc_patch_data_id[2]; - unsigned char mc_patch_data_len; - unsigned char init_flag; - unsigned int mc_patch_data_checksum; - unsigned int nb_dev_id; - unsigned int sb_dev_id; - unsigned char processor_rev_id[2]; - unsigned char nb_rev_id; - unsigned char sb_rev_id; - unsigned char bios_api_rev; - unsigned char reserved1[3]; - unsigned int match_reg[8]; -}; + u32 data_code; + u32 patch_id; + u16 mc_patch_data_id; + u8 mc_patch_data_len; + u8 init_flag; + u32 mc_patch_data_checksum; + u32 nb_dev_id; + u32 sb_dev_id; + u16 processor_rev_id; + u8 nb_rev_id; + u8 sb_rev_id; + u8 bios_api_rev; + u8 reserved1[3]; + u32 match_reg[8]; +} __attribute__((packed)); struct microcode_amd { struct microcode_header_amd hdr; unsigned int mpb[0]; }; -#define UCODE_MAX_SIZE (2048) -#define DEFAULT_UCODE_DATASIZE (896) -#define MC_HEADER_SIZE (sizeof(struct microcode_header_amd)) -#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) -#define DWSIZE (sizeof(u32)) -/* For now we support a fixed ucode total size only */ -#define get_totalsize(mc) \ - ((((struct microcode_amd *)mc)->hdr.mc_patch_data_len * 28) \ - + MC_HEADER_SIZE) +#define UCODE_MAX_SIZE 2048 +#define UCODE_CONTAINER_SECTION_HDR 8 +#define UCODE_CONTAINER_HEADER_SIZE 12 /* serialize access to the physical write */ static DEFINE_SPINLOCK(microcode_update_lock); @@ -93,31 +88,24 @@ static struct equiv_cpu_entry *equiv_cpu_table; static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu); + u32 dummy; memset(csig, 0, sizeof(*csig)); - if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { - printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n", - cpu); + printk(KERN_WARNING "microcode: CPU%d: AMD CPU family 0x%x not " + "supported\n", cpu, c->x86); return -1; } - - asm volatile("movl %1, %%ecx; rdmsr" - : "=a" (csig->rev) - : "i" (0x0000008B) : "ecx"); - - printk(KERN_INFO "microcode: collect_cpu_info_amd : patch_id=0x%x\n", - csig->rev); - + rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); + printk(KERN_INFO "microcode: CPU%d: patch_level=0x%x\n", cpu, csig->rev); return 0; } static int get_matching_microcode(int cpu, void *mc, int rev) { struct microcode_header_amd *mc_header = mc; - struct pci_dev *nb_pci_dev, *sb_pci_dev; unsigned int current_cpu_id; - unsigned int equiv_cpu_id = 0x00; + u16 equiv_cpu_id = 0; unsigned int i = 0; BUG_ON(equiv_cpu_table == NULL); @@ -132,57 +120,25 @@ static int get_matching_microcode(int cpu, void *mc, int rev) } if (!equiv_cpu_id) { - printk(KERN_ERR "microcode: CPU%d cpu_id " - "not found in equivalent cpu table \n", cpu); + printk(KERN_WARNING "microcode: CPU%d: cpu revision " + "not listed in equivalent cpu table\n", cpu); return 0; } - if ((mc_header->processor_rev_id[0]) != (equiv_cpu_id & 0xff)) { - printk(KERN_ERR - "microcode: CPU%d patch does not match " - "(patch is %x, cpu extended is %x) \n", - cpu, mc_header->processor_rev_id[0], - (equiv_cpu_id & 0xff)); + if (mc_header->processor_rev_id != equiv_cpu_id) { + printk(KERN_ERR "microcode: CPU%d: patch mismatch " + "(processor_rev_id: %x, equiv_cpu_id: %x)\n", + cpu, mc_header->processor_rev_id, equiv_cpu_id); return 0; } - if ((mc_header->processor_rev_id[1]) != ((equiv_cpu_id >> 16) & 0xff)) { - printk(KERN_ERR "microcode: CPU%d patch does not match " - "(patch is %x, cpu base id is %x) \n", - cpu, mc_header->processor_rev_id[1], - ((equiv_cpu_id >> 16) & 0xff)); - + /* ucode might be chipset specific -- currently we don't support this */ + if (mc_header->nb_dev_id || mc_header->sb_dev_id) { + printk(KERN_ERR "microcode: CPU%d: loading of chipset " + "specific code not yet supported\n", cpu); return 0; } - /* ucode may be northbridge specific */ - if (mc_header->nb_dev_id) { - nb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD, - (mc_header->nb_dev_id & 0xff), - NULL); - if ((!nb_pci_dev) || - (mc_header->nb_rev_id != nb_pci_dev->revision)) { - printk(KERN_ERR "microcode: CPU%d NB mismatch \n", cpu); - pci_dev_put(nb_pci_dev); - return 0; - } - pci_dev_put(nb_pci_dev); - } - - /* ucode may be southbridge specific */ - if (mc_header->sb_dev_id) { - sb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD, - (mc_header->sb_dev_id & 0xff), - NULL); - if ((!sb_pci_dev) || - (mc_header->sb_rev_id != sb_pci_dev->revision)) { - printk(KERN_ERR "microcode: CPU%d SB mismatch \n", cpu); - pci_dev_put(sb_pci_dev); - return 0; - } - pci_dev_put(sb_pci_dev); - } - if (mc_header->patch_id <= rev) return 0; @@ -192,12 +148,10 @@ static int get_matching_microcode(int cpu, void *mc, int rev) static void apply_microcode_amd(int cpu) { unsigned long flags; - unsigned int eax, edx; - unsigned int rev; + u32 rev, dummy; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; struct microcode_amd *mc_amd = uci->mc; - unsigned long addr; /* We should bind the task to the CPU */ BUG_ON(cpu_num != cpu); @@ -206,42 +160,34 @@ static void apply_microcode_amd(int cpu) return; spin_lock_irqsave(µcode_update_lock, flags); - - addr = (unsigned long)&mc_amd->hdr.data_code; - edx = (unsigned int)(((unsigned long)upper_32_bits(addr))); - eax = (unsigned int)(((unsigned long)lower_32_bits(addr))); - - asm volatile("movl %0, %%ecx; wrmsr" : - : "i" (0xc0010020), "a" (eax), "d" (edx) : "ecx"); - + wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); /* get patch id after patching */ - asm volatile("movl %1, %%ecx; rdmsr" - : "=a" (rev) - : "i" (0x0000008B) : "ecx"); - + rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); spin_unlock_irqrestore(µcode_update_lock, flags); /* check current patch id and patch's id for match */ if (rev != mc_amd->hdr.patch_id) { - printk(KERN_ERR "microcode: CPU%d update from revision " - "0x%x to 0x%x failed\n", cpu_num, - mc_amd->hdr.patch_id, rev); + printk(KERN_ERR "microcode: CPU%d: update failed " + "(for patch_level=0x%x)\n", cpu, mc_amd->hdr.patch_id); return; } - printk(KERN_INFO "microcode: CPU%d updated from revision " - "0x%x to 0x%x \n", - cpu_num, uci->cpu_sig.rev, mc_amd->hdr.patch_id); + printk(KERN_INFO "microcode: CPU%d: updated (new patch_level=0x%x)\n", + cpu, rev); uci->cpu_sig.rev = rev; } -static void * get_next_ucode(u8 *buf, unsigned int size, - int (*get_ucode_data)(void *, const void *, size_t), - unsigned int *mc_size) +static int get_ucode_data(void *to, const u8 *from, size_t n) +{ + memcpy(to, from, n); + return 0; +} + +static void *get_next_ucode(const u8 *buf, unsigned int size, + unsigned int *mc_size) { unsigned int total_size; -#define UCODE_CONTAINER_SECTION_HDR 8 u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; void *mc; @@ -249,39 +195,37 @@ static void * get_next_ucode(u8 *buf, unsigned int size, return NULL; if (section_hdr[0] != UCODE_UCODE_TYPE) { - printk(KERN_ERR "microcode: error! " - "Wrong microcode payload type field\n"); + printk(KERN_ERR "microcode: error: invalid type field in " + "container file section header\n"); return NULL; } total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); - printk(KERN_INFO "microcode: size %u, total_size %u\n", - size, total_size); + printk(KERN_DEBUG "microcode: size %u, total_size %u\n", + size, total_size); if (total_size > size || total_size > UCODE_MAX_SIZE) { - printk(KERN_ERR "microcode: error! Bad data in microcode data file\n"); + printk(KERN_ERR "microcode: error: size mismatch\n"); return NULL; } mc = vmalloc(UCODE_MAX_SIZE); if (mc) { memset(mc, 0, UCODE_MAX_SIZE); - if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size)) { + if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, + total_size)) { vfree(mc); mc = NULL; } else *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR; } -#undef UCODE_CONTAINER_SECTION_HDR return mc; } -static int install_equiv_cpu_table(u8 *buf, - int (*get_ucode_data)(void *, const void *, size_t)) +static int install_equiv_cpu_table(const u8 *buf) { -#define UCODE_CONTAINER_HEADER_SIZE 12 u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; unsigned int *buf_pos = (unsigned int *)container_hdr; unsigned long size; @@ -292,14 +236,15 @@ static int install_equiv_cpu_table(u8 *buf, size = buf_pos[2]; if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { - printk(KERN_ERR "microcode: error! " - "Wrong microcode equivalnet cpu table\n"); + printk(KERN_ERR "microcode: error: invalid type field in " + "container file section header\n"); return 0; } equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); if (!equiv_cpu_table) { - printk(KERN_ERR "microcode: error, can't allocate memory for equiv CPU table\n"); + printk(KERN_ERR "microcode: failed to allocate " + "equivalent CPU table\n"); return 0; } @@ -310,7 +255,6 @@ static int install_equiv_cpu_table(u8 *buf, } return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ -#undef UCODE_CONTAINER_HEADER_SIZE } static void free_equiv_cpu_table(void) @@ -321,18 +265,20 @@ static void free_equiv_cpu_table(void) } } -static int generic_load_microcode(int cpu, void *data, size_t size, - int (*get_ucode_data)(void *, const void *, size_t)) +static int generic_load_microcode(int cpu, const u8 *data, size_t size) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - u8 *ucode_ptr = data, *new_mc = NULL, *mc; + const u8 *ucode_ptr = data; + void *new_mc = NULL; + void *mc; int new_rev = uci->cpu_sig.rev; unsigned int leftover; unsigned long offset; - offset = install_equiv_cpu_table(ucode_ptr, get_ucode_data); + offset = install_equiv_cpu_table(ucode_ptr); if (!offset) { - printk(KERN_ERR "microcode: installing equivalent cpu table failed\n"); + printk(KERN_ERR "microcode: failed to create " + "equivalent cpu table\n"); return -EINVAL; } @@ -343,7 +289,7 @@ static int generic_load_microcode(int cpu, void *data, size_t size, unsigned int uninitialized_var(mc_size); struct microcode_header_amd *mc_header; - mc = get_next_ucode(ucode_ptr, leftover, get_ucode_data, &mc_size); + mc = get_next_ucode(ucode_ptr, leftover, &mc_size); if (!mc) break; @@ -353,7 +299,7 @@ static int generic_load_microcode(int cpu, void *data, size_t size, vfree(new_mc); new_rev = mc_header->patch_id; new_mc = mc; - } else + } else vfree(mc); ucode_ptr += mc_size; @@ -365,9 +311,9 @@ static int generic_load_microcode(int cpu, void *data, size_t size, if (uci->mc) vfree(uci->mc); uci->mc = new_mc; - pr_debug("microcode: CPU%d found a matching microcode update with" - " version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); + pr_debug("microcode: CPU%d found a matching microcode " + "update with version 0x%x (current=0x%x)\n", + cpu, new_rev, uci->cpu_sig.rev); } else vfree(new_mc); } @@ -377,12 +323,6 @@ static int generic_load_microcode(int cpu, void *data, size_t size, return (int)leftover; } -static int get_ucode_fw(void *to, const void *from, size_t n) -{ - memcpy(to, from, n); - return 0; -} - static int request_microcode_fw(int cpu, struct device *device) { const char *fw_name = "amd-ucode/microcode_amd.bin"; @@ -394,12 +334,11 @@ static int request_microcode_fw(int cpu, struct device *device) ret = request_firmware(&firmware, fw_name, device); if (ret) { - printk(KERN_ERR "microcode: ucode data file %s load failed\n", fw_name); + printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); return ret; } - ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size, - &get_ucode_fw); + ret = generic_load_microcode(cpu, firmware->data, firmware->size); release_firmware(firmware); @@ -408,8 +347,8 @@ static int request_microcode_fw(int cpu, struct device *device) static int request_microcode_user(int cpu, const void __user *buf, size_t size) { - printk(KERN_WARNING "microcode: AMD microcode update via /dev/cpu/microcode" - "is not supported\n"); + printk(KERN_INFO "microcode: AMD microcode update via " + "/dev/cpu/microcode not supported\n"); return -1; } @@ -433,3 +372,4 @@ struct microcode_ops * __init init_amd_microcode(void) { return µcode_amd_ops; } + diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index c4b5b24e0217..c9b721ba968c 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -99,7 +99,7 @@ MODULE_LICENSE("GPL"); #define MICROCODE_VERSION "2.00" -struct microcode_ops *microcode_ops; +static struct microcode_ops *microcode_ops; /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ static DEFINE_MUTEX(microcode_mutex); @@ -203,7 +203,7 @@ MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); #endif /* fake device for request_firmware */ -struct platform_device *microcode_pdev; +static struct platform_device *microcode_pdev; static ssize_t reload_store(struct sys_device *dev, struct sysdev_attribute *attr, @@ -328,7 +328,7 @@ static int microcode_resume_cpu(int cpu) return 0; } -void microcode_update_cpu(int cpu) +static void microcode_update_cpu(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; int err = 0; diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index a8e62792d171..b7f4c929e615 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -471,7 +471,7 @@ static void microcode_fini_cpu(int cpu) uci->mc = NULL; } -struct microcode_ops microcode_intel_ops = { +static struct microcode_ops microcode_intel_ops = { .request_microcode_user = request_microcode_user, .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 0f4c1fd5a1f4..45e3b69808ba 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -586,26 +586,23 @@ static void __init __get_smp_config(unsigned int early) { struct intel_mp_floating *mpf = mpf_found; - if (x86_quirks->mach_get_smp_config) { - if (x86_quirks->mach_get_smp_config(early)) - return; - } + if (!mpf) + return; + if (acpi_lapic && early) return; + /* - * ACPI supports both logical (e.g. Hyper-Threading) and physical - * processors, where MPS only supports physical. + * MPS doesn't support hyperthreading, aka only have + * thread 0 apic id in MPS table */ - if (acpi_lapic && acpi_ioapic) { - printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " - "information\n"); + if (acpi_lapic && acpi_ioapic) return; - } else if (acpi_lapic) - printk(KERN_INFO "Using ACPI for processor (LAPIC) " - "configuration information\n"); - if (!mpf) - return; + if (x86_quirks->mach_get_smp_config) { + if (x86_quirks->mach_get_smp_config(early)) + return; + } printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 2c97f07f1c2c..8bd1bf9622a7 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -131,6 +131,11 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) atomic_dec(&nmi_active); } +static void __acpi_nmi_disable(void *__unused) +{ + apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); +} + int __init check_nmi_watchdog(void) { unsigned int *prev_nmi_count; @@ -179,8 +184,12 @@ int __init check_nmi_watchdog(void) kfree(prev_nmi_count); return 0; error: - if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259) - disable_8259A_irq(0); + if (nmi_watchdog == NMI_IO_APIC) { + if (!timer_through_8259) + disable_8259A_irq(0); + on_each_cpu(__acpi_nmi_disable, NULL, 1); + } + #ifdef CONFIG_X86_32 timer_ack = 0; #endif @@ -199,12 +208,17 @@ static int __init setup_nmi_watchdog(char *str) ++str; } - get_option(&str, &nmi); - - if (nmi >= NMI_INVALID) - return 0; + if (!strncmp(str, "lapic", 5)) + nmi_watchdog = NMI_LOCAL_APIC; + else if (!strncmp(str, "ioapic", 6)) + nmi_watchdog = NMI_IO_APIC; + else { + get_option(&str, &nmi); + if (nmi >= NMI_INVALID) + return 0; + nmi_watchdog = nmi; + } - nmi_watchdog = nmi; return 1; } __setup("nmi_watchdog=", setup_nmi_watchdog); @@ -285,11 +299,6 @@ void acpi_nmi_enable(void) on_each_cpu(__acpi_nmi_enable, NULL, 1); } -static void __acpi_nmi_disable(void *__unused) -{ - apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); -} - /* * Disable timer based NMIs on all CPUs: */ @@ -340,6 +349,8 @@ void stop_apic_nmi_watchdog(void *unused) return; if (nmi_watchdog == NMI_LOCAL_APIC) lapic_watchdog_stop(); + else + __acpi_nmi_disable(NULL); __get_cpu_var(wd_enabled) = 0; atomic_dec(&nmi_active); } @@ -465,6 +476,24 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) #ifdef CONFIG_SYSCTL +static void enable_ioapic_nmi_watchdog_single(void *unused) +{ + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); + __acpi_nmi_enable(NULL); +} + +static void enable_ioapic_nmi_watchdog(void) +{ + on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1); + touch_nmi_watchdog(); +} + +static void disable_ioapic_nmi_watchdog(void) +{ + on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); +} + static int __init setup_unknown_nmi_panic(char *str) { unknown_nmi_panic = 1; @@ -507,6 +536,11 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, enable_lapic_nmi_watchdog(); else disable_lapic_nmi_watchdog(); + } else if (nmi_watchdog == NMI_IO_APIC) { + if (nmi_watchdog_enabled) + enable_ioapic_nmi_watchdog(); + else + disable_ioapic_nmi_watchdog(); } else { printk(KERN_WARNING "NMI watchdog doesn't know what hardware to touch\n"); diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 4caff39078e0..0deea37a53cf 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -31,7 +31,7 @@ #include <asm/numaq.h> #include <asm/topology.h> #include <asm/processor.h> -#include <asm/mpspec.h> +#include <asm/genapic.h> #include <asm/e820.h> #include <asm/setup.h> @@ -235,6 +235,13 @@ static int __init numaq_setup_ioapic_ids(void) return 1; } +static int __init numaq_update_genapic(void) +{ + genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; + + return 0; +} + static struct x86_quirks numaq_x86_quirks __initdata = { .arch_pre_time_init = numaq_pre_time_init, .arch_time_init = NULL, @@ -250,6 +257,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = { .mpc_oem_pci_bus = mpc_oem_pci_bus, .smp_read_mpc_oem = smp_read_mpc_oem, .setup_ioapic_ids = numaq_setup_ioapic_ids, + .update_genapic = numaq_update_genapic, }; void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 00e07447a5bd..19a1044a0cd9 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -6,6 +6,7 @@ #include <asm/proto.h> #include <asm/dma.h> #include <asm/iommu.h> +#include <asm/gart.h> #include <asm/calgary.h> #include <asm/amd_iommu.h> @@ -30,11 +31,6 @@ int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; -/* This tells the BIO block layer to assume merging. Default to off - because we cannot guarantee merging later. */ -int iommu_bio_merge __read_mostly = 0; -EXPORT_SYMBOL(iommu_bio_merge); - dma_addr_t bad_dma_address __read_mostly = 0; EXPORT_SYMBOL(bad_dma_address); @@ -183,7 +179,6 @@ static __init int iommu_setup(char *p) } if (!strncmp(p, "biomerge", 8)) { - iommu_bio_merge = 4096; iommu_merge = 1; force_iommu = 1; } @@ -295,8 +290,8 @@ fs_initcall(pci_iommu_init); static __devinit void via_no_dac(struct pci_dev *dev) { if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { - printk(KERN_INFO "PCI: VIA PCI bridge detected." - "Disabling DAC.\n"); + printk(KERN_INFO + "PCI: VIA PCI bridge detected. Disabling DAC.\n"); forbid_dac = 1; } } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c622772744d8..e68bb9e30864 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -1,13 +1,16 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <asm/idle.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/clockchips.h> +#include <linux/ftrace.h> #include <asm/system.h> +#include <asm/apic.h> unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); @@ -100,6 +103,9 @@ static inline int hlt_use_halt(void) void default_idle(void) { if (hlt_use_halt()) { + struct power_trace it; + + trace_power_start(&it, POWER_CSTATE, 1); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we @@ -112,6 +118,7 @@ void default_idle(void) else local_irq_enable(); current_thread_info()->status |= TS_POLLING; + trace_power_end(&it); } else { local_irq_enable(); /* loop is done by the caller */ @@ -122,6 +129,21 @@ void default_idle(void) EXPORT_SYMBOL(default_idle); #endif +void stop_this_cpu(void *dummy) +{ + local_irq_disable(); + /* + * Remove this CPU: + */ + cpu_clear(smp_processor_id(), cpu_online_map); + disable_local_APIC(); + + for (;;) { + if (hlt_works(smp_processor_id())) + halt(); + } +} + static void do_nothing(void *unused) { } @@ -154,24 +176,31 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { + struct power_trace it; + + trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); if (!need_resched()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(ax, cx); } + trace_power_end(&it); } /* Default MONITOR/MWAIT with no hints, used for default C1 state */ static void mwait_idle(void) { + struct power_trace it; if (!need_resched()) { + trace_power_start(&it, POWER_CSTATE, 1); __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __sti_mwait(0, 0); else local_irq_enable(); + trace_power_end(&it); } else local_irq_enable(); } @@ -183,9 +212,13 @@ static void mwait_idle(void) */ static void poll_idle(void) { + struct power_trace it; + + trace_power_start(&it, POWER_CSTATE, 0); local_irq_enable(); while (!need_resched()) cpu_relax(); + trace_power_end(&it); } /* @@ -270,7 +303,7 @@ static void c1e_idle(void) rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); if (lo & K8_INTP_C1E_ACTIVE_MASK) { c1e_detected = 1; - if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halt in AMD C1E"); printk(KERN_INFO "System has AMD C1E enabled\n"); set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 0a1302fe6d45..3ba155d24884 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -38,6 +38,7 @@ #include <linux/percpu.h> #include <linux/prctl.h> #include <linux/dmi.h> +#include <linux/ftrace.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -59,6 +60,7 @@ #include <asm/idle.h> #include <asm/syscalls.h> #include <asm/smp.h> +#include <asm/ds.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -250,14 +252,8 @@ void exit_thread(void) tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; put_cpu(); } -#ifdef CONFIG_X86_DS - /* Free any DS contexts that have not been properly released. */ - if (unlikely(current->thread.ds_ctx)) { - /* we clear debugctl to make sure DS is not used. */ - update_debugctlmsr(0); - ds_free(current->thread.ds_ctx); - } -#endif /* CONFIG_X86_DS */ + + ds_exit_thread(current); } void flush_thread(void) @@ -339,6 +335,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } + + ds_copy_thread(p, current); + + clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); + p->thread.debugctlmsr = 0; + return err; } @@ -419,48 +421,19 @@ int set_tsc_mode(unsigned int val) return 0; } -#ifdef CONFIG_X86_DS -static int update_debugctl(struct thread_struct *prev, - struct thread_struct *next, unsigned long debugctl) -{ - unsigned long ds_prev = 0; - unsigned long ds_next = 0; - - if (prev->ds_ctx) - ds_prev = (unsigned long)prev->ds_ctx->ds; - if (next->ds_ctx) - ds_next = (unsigned long)next->ds_ctx->ds; - - if (ds_next != ds_prev) { - /* we clear debugctl to make sure DS - * is not in use when we change it */ - debugctl = 0; - update_debugctlmsr(0); - wrmsr(MSR_IA32_DS_AREA, ds_next, 0); - } - return debugctl; -} -#else -static int update_debugctl(struct thread_struct *prev, - struct thread_struct *next, unsigned long debugctl) -{ - return debugctl; -} -#endif /* CONFIG_X86_DS */ - static noinline void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss) { struct thread_struct *prev, *next; - unsigned long debugctl; prev = &prev_p->thread; next = &next_p->thread; - debugctl = update_debugctl(prev, next, prev->debugctlmsr); - - if (next->debugctlmsr != debugctl) + if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || + test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) + ds_switch_to(prev_p, next_p); + else if (next->debugctlmsr != prev->debugctlmsr) update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { @@ -482,15 +455,6 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, hard_enable_TSC(); } -#ifdef CONFIG_X86_PTRACE_BTS - if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) - ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); - - if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) - ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); -#endif /* CONFIG_X86_PTRACE_BTS */ - - if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { /* * Disable the bitmap via an invalid offset. We still cache @@ -548,7 +512,8 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ -struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) +__notrace_funcgraph struct task_struct * +__switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index c958120fb1b6..416fb9282f4f 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -39,6 +39,7 @@ #include <linux/prctl.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/ftrace.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -52,6 +53,7 @@ #include <asm/ia32.h> #include <asm/idle.h> #include <asm/syscalls.h> +#include <asm/ds.h> asmlinkage extern void ret_from_fork(void); @@ -235,14 +237,8 @@ void exit_thread(void) t->io_bitmap_max = 0; put_cpu(); } -#ifdef CONFIG_X86_DS - /* Free any DS contexts that have not been properly released. */ - if (unlikely(t->ds_ctx)) { - /* we clear debugctl to make sure DS is not used. */ - update_debugctlmsr(0); - ds_free(t->ds_ctx); - } -#endif /* CONFIG_X86_DS */ + + ds_exit_thread(current); } void flush_thread(void) @@ -372,6 +368,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, if (err) goto out; } + + ds_copy_thread(p, me); + + clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); + p->thread.debugctlmsr = 0; + err = 0; out: if (err && p->thread.io_bitmap_ptr) { @@ -470,35 +472,14 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, struct tss_struct *tss) { struct thread_struct *prev, *next; - unsigned long debugctl; prev = &prev_p->thread, next = &next_p->thread; - debugctl = prev->debugctlmsr; - -#ifdef CONFIG_X86_DS - { - unsigned long ds_prev = 0, ds_next = 0; - - if (prev->ds_ctx) - ds_prev = (unsigned long)prev->ds_ctx->ds; - if (next->ds_ctx) - ds_next = (unsigned long)next->ds_ctx->ds; - - if (ds_next != ds_prev) { - /* - * We clear debugctl to make sure DS - * is not in use when we change it: - */ - debugctl = 0; - update_debugctlmsr(0); - wrmsrl(MSR_IA32_DS_AREA, ds_next); - } - } -#endif /* CONFIG_X86_DS */ - - if (next->debugctlmsr != debugctl) + if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || + test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) + ds_switch_to(prev_p, next_p); + else if (next->debugctlmsr != prev->debugctlmsr) update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { @@ -533,14 +514,6 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, */ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); } - -#ifdef CONFIG_X86_PTRACE_BTS - if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) - ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); - - if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) - ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); -#endif /* CONFIG_X86_PTRACE_BTS */ } /* @@ -551,8 +524,9 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. + * Function graph tracer not supported too. */ -struct task_struct * +__notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0a6d8c12e10d..0a5df5f82fb9 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -581,158 +581,91 @@ static int ioperm_get(struct task_struct *target, } #ifdef CONFIG_X86_PTRACE_BTS -/* - * The configuration for a particular BTS hardware implementation. - */ -struct bts_configuration { - /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */ - unsigned char sizeof_bts; - /* the size of a field in the BTS record in bytes */ - unsigned char sizeof_field; - /* a bitmask to enable/disable BTS in DEBUGCTL MSR */ - unsigned long debugctl_mask; -}; -static struct bts_configuration bts_cfg; - -#define BTS_MAX_RECORD_SIZE (8 * 3) - - -/* - * Branch Trace Store (BTS) uses the following format. Different - * architectures vary in the size of those fields. - * - source linear address - * - destination linear address - * - flags - * - * Later architectures use 64bit pointers throughout, whereas earlier - * architectures use 32bit pointers in 32bit mode. - * - * We compute the base address for the first 8 fields based on: - * - the field size stored in the DS configuration - * - the relative field position - * - * In order to store additional information in the BTS buffer, we use - * a special source address to indicate that the record requires - * special interpretation. - * - * Netburst indicated via a bit in the flags field whether the branch - * was predicted; this is ignored. - */ - -enum bts_field { - bts_from = 0, - bts_to, - bts_flags, - - bts_escape = (unsigned long)-1, - bts_qual = bts_to, - bts_jiffies = bts_flags -}; - -static inline unsigned long bts_get(const char *base, enum bts_field field) -{ - base += (bts_cfg.sizeof_field * field); - return *(unsigned long *)base; -} - -static inline void bts_set(char *base, enum bts_field field, unsigned long val) -{ - base += (bts_cfg.sizeof_field * field);; - (*(unsigned long *)base) = val; -} - -/* - * Translate a BTS record from the raw format into the bts_struct format - * - * out (out): bts_struct interpretation - * raw: raw BTS record - */ -static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw) -{ - memset(out, 0, sizeof(*out)); - if (bts_get(raw, bts_from) == bts_escape) { - out->qualifier = bts_get(raw, bts_qual); - out->variant.jiffies = bts_get(raw, bts_jiffies); - } else { - out->qualifier = BTS_BRANCH; - out->variant.lbr.from_ip = bts_get(raw, bts_from); - out->variant.lbr.to_ip = bts_get(raw, bts_to); - } -} - static int ptrace_bts_read_record(struct task_struct *child, size_t index, struct bts_struct __user *out) { - struct bts_struct ret; - const void *bts_record; - size_t bts_index, bts_end; + const struct bts_trace *trace; + struct bts_struct bts; + const unsigned char *at; int error; - error = ds_get_bts_end(child, &bts_end); - if (error < 0) - return error; - - if (bts_end <= index) - return -EINVAL; + trace = ds_read_bts(child->bts); + if (!trace) + return -EPERM; - error = ds_get_bts_index(child, &bts_index); - if (error < 0) - return error; + at = trace->ds.top - ((index + 1) * trace->ds.size); + if ((void *)at < trace->ds.begin) + at += (trace->ds.n * trace->ds.size); - /* translate the ptrace bts index into the ds bts index */ - bts_index += bts_end - (index + 1); - if (bts_end <= bts_index) - bts_index -= bts_end; + if (!trace->read) + return -EOPNOTSUPP; - error = ds_access_bts(child, bts_index, &bts_record); + error = trace->read(child->bts, at, &bts); if (error < 0) return error; - ptrace_bts_translate_record(&ret, bts_record); - - if (copy_to_user(out, &ret, sizeof(ret))) + if (copy_to_user(out, &bts, sizeof(bts))) return -EFAULT; - return sizeof(ret); + return sizeof(bts); } static int ptrace_bts_drain(struct task_struct *child, long size, struct bts_struct __user *out) { - struct bts_struct ret; - const unsigned char *raw; - size_t end, i; - int error; + const struct bts_trace *trace; + const unsigned char *at; + int error, drained = 0; - error = ds_get_bts_index(child, &end); - if (error < 0) - return error; + trace = ds_read_bts(child->bts); + if (!trace) + return -EPERM; - if (size < (end * sizeof(struct bts_struct))) + if (!trace->read) + return -EOPNOTSUPP; + + if (size < (trace->ds.top - trace->ds.begin)) return -EIO; - error = ds_access_bts(child, 0, (const void **)&raw); - if (error < 0) - return error; + for (at = trace->ds.begin; (void *)at < trace->ds.top; + out++, drained++, at += trace->ds.size) { + struct bts_struct bts; + int error; - for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) { - ptrace_bts_translate_record(&ret, raw); + error = trace->read(child->bts, at, &bts); + if (error < 0) + return error; - if (copy_to_user(out, &ret, sizeof(ret))) + if (copy_to_user(out, &bts, sizeof(bts))) return -EFAULT; } - error = ds_clear_bts(child); + memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); + + error = ds_reset_bts(child->bts); if (error < 0) return error; - return end; + return drained; } -static void ptrace_bts_ovfl(struct task_struct *child) +static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size) { - send_sig(child->thread.bts_ovfl_signal, child, 0); + child->bts_buffer = alloc_locked_buffer(size); + if (!child->bts_buffer) + return -ENOMEM; + + child->bts_size = size; + + return 0; +} + +static void ptrace_bts_free_buffer(struct task_struct *child) +{ + free_locked_buffer(child->bts_buffer, child->bts_size); + child->bts_buffer = NULL; + child->bts_size = 0; } static int ptrace_bts_config(struct task_struct *child, @@ -740,114 +673,86 @@ static int ptrace_bts_config(struct task_struct *child, const struct ptrace_bts_config __user *ucfg) { struct ptrace_bts_config cfg; - int error = 0; - - error = -EOPNOTSUPP; - if (!bts_cfg.sizeof_bts) - goto errout; + unsigned int flags = 0; - error = -EIO; if (cfg_size < sizeof(cfg)) - goto errout; + return -EIO; - error = -EFAULT; if (copy_from_user(&cfg, ucfg, sizeof(cfg))) - goto errout; + return -EFAULT; - error = -EINVAL; - if ((cfg.flags & PTRACE_BTS_O_SIGNAL) && - !(cfg.flags & PTRACE_BTS_O_ALLOC)) - goto errout; + if (child->bts) { + ds_release_bts(child->bts); + child->bts = NULL; + } - if (cfg.flags & PTRACE_BTS_O_ALLOC) { - ds_ovfl_callback_t ovfl = NULL; - unsigned int sig = 0; + if (cfg.flags & PTRACE_BTS_O_SIGNAL) { + if (!cfg.signal) + return -EINVAL; - /* we ignore the error in case we were not tracing child */ - (void)ds_release_bts(child); + return -EOPNOTSUPP; - if (cfg.flags & PTRACE_BTS_O_SIGNAL) { - if (!cfg.signal) - goto errout; + child->thread.bts_ovfl_signal = cfg.signal; + } - sig = cfg.signal; - ovfl = ptrace_bts_ovfl; - } + if ((cfg.flags & PTRACE_BTS_O_ALLOC) && + (cfg.size != child->bts_size)) { + int error; - error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl); - if (error < 0) - goto errout; + ptrace_bts_free_buffer(child); - child->thread.bts_ovfl_signal = sig; + error = ptrace_bts_allocate_buffer(child, cfg.size); + if (error < 0) + return error; } - error = -EINVAL; - if (!child->thread.ds_ctx && cfg.flags) - goto errout; - if (cfg.flags & PTRACE_BTS_O_TRACE) - child->thread.debugctlmsr |= bts_cfg.debugctl_mask; - else - child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; + flags |= BTS_USER; if (cfg.flags & PTRACE_BTS_O_SCHED) - set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); - else - clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); + flags |= BTS_TIMESTAMPS; - error = sizeof(cfg); + child->bts = ds_request_bts(child, child->bts_buffer, child->bts_size, + /* ovfl = */ NULL, /* th = */ (size_t)-1, + flags); + if (IS_ERR(child->bts)) { + int error = PTR_ERR(child->bts); -out: - if (child->thread.debugctlmsr) - set_tsk_thread_flag(child, TIF_DEBUGCTLMSR); - else - clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); + ptrace_bts_free_buffer(child); + child->bts = NULL; - return error; + return error; + } -errout: - child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; - clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); - goto out; + return sizeof(cfg); } static int ptrace_bts_status(struct task_struct *child, long cfg_size, struct ptrace_bts_config __user *ucfg) { + const struct bts_trace *trace; struct ptrace_bts_config cfg; - size_t end; - const void *base, *max; - int error; if (cfg_size < sizeof(cfg)) return -EIO; - error = ds_get_bts_end(child, &end); - if (error < 0) - return error; - - error = ds_access_bts(child, /* index = */ 0, &base); - if (error < 0) - return error; - - error = ds_access_bts(child, /* index = */ end, &max); - if (error < 0) - return error; + trace = ds_read_bts(child->bts); + if (!trace) + return -EPERM; memset(&cfg, 0, sizeof(cfg)); - cfg.size = (max - base); + cfg.size = trace->ds.end - trace->ds.begin; cfg.signal = child->thread.bts_ovfl_signal; cfg.bts_size = sizeof(struct bts_struct); if (cfg.signal) cfg.flags |= PTRACE_BTS_O_SIGNAL; - if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && - child->thread.debugctlmsr & bts_cfg.debugctl_mask) + if (trace->ds.flags & BTS_USER) cfg.flags |= PTRACE_BTS_O_TRACE; - if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) + if (trace->ds.flags & BTS_TIMESTAMPS) cfg.flags |= PTRACE_BTS_O_SCHED; if (copy_to_user(ucfg, &cfg, sizeof(cfg))) @@ -856,110 +761,77 @@ static int ptrace_bts_status(struct task_struct *child, return sizeof(cfg); } -static int ptrace_bts_write_record(struct task_struct *child, - const struct bts_struct *in) +static int ptrace_bts_clear(struct task_struct *child) { - unsigned char bts_record[BTS_MAX_RECORD_SIZE]; + const struct bts_trace *trace; - BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts); + trace = ds_read_bts(child->bts); + if (!trace) + return -EPERM; - memset(bts_record, 0, bts_cfg.sizeof_bts); - switch (in->qualifier) { - case BTS_INVALID: - break; + memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); - case BTS_BRANCH: - bts_set(bts_record, bts_from, in->variant.lbr.from_ip); - bts_set(bts_record, bts_to, in->variant.lbr.to_ip); - break; + return ds_reset_bts(child->bts); +} - case BTS_TASK_ARRIVES: - case BTS_TASK_DEPARTS: - bts_set(bts_record, bts_from, bts_escape); - bts_set(bts_record, bts_qual, in->qualifier); - bts_set(bts_record, bts_jiffies, in->variant.jiffies); - break; +static int ptrace_bts_size(struct task_struct *child) +{ + const struct bts_trace *trace; - default: - return -EINVAL; - } + trace = ds_read_bts(child->bts); + if (!trace) + return -EPERM; - /* The writing task will be the switched-to task on a context - * switch. It needs to write into the switched-from task's BTS - * buffer. */ - return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts); + return (trace->ds.top - trace->ds.begin) / trace->ds.size; } -void ptrace_bts_take_timestamp(struct task_struct *tsk, - enum bts_qualifier qualifier) +static void ptrace_bts_fork(struct task_struct *tsk) { - struct bts_struct rec = { - .qualifier = qualifier, - .variant.jiffies = jiffies_64 - }; - - ptrace_bts_write_record(tsk, &rec); + tsk->bts = NULL; + tsk->bts_buffer = NULL; + tsk->bts_size = 0; + tsk->thread.bts_ovfl_signal = 0; } -static const struct bts_configuration bts_cfg_netburst = { - .sizeof_bts = sizeof(long) * 3, - .sizeof_field = sizeof(long), - .debugctl_mask = (1<<2)|(1<<3)|(1<<5) -}; +static void ptrace_bts_untrace(struct task_struct *child) +{ + if (unlikely(child->bts)) { + ds_release_bts(child->bts); + child->bts = NULL; + + /* We cannot update total_vm and locked_vm since + child's mm is already gone. But we can reclaim the + memory. */ + kfree(child->bts_buffer); + child->bts_buffer = NULL; + child->bts_size = 0; + } +} -static const struct bts_configuration bts_cfg_pentium_m = { - .sizeof_bts = sizeof(long) * 3, - .sizeof_field = sizeof(long), - .debugctl_mask = (1<<6)|(1<<7) -}; +static void ptrace_bts_detach(struct task_struct *child) +{ + if (unlikely(child->bts)) { + ds_release_bts(child->bts); + child->bts = NULL; -static const struct bts_configuration bts_cfg_core2 = { - .sizeof_bts = 8 * 3, - .sizeof_field = 8, - .debugctl_mask = (1<<6)|(1<<7)|(1<<9) -}; + ptrace_bts_free_buffer(child); + } +} +#else +static inline void ptrace_bts_fork(struct task_struct *tsk) {} +static inline void ptrace_bts_detach(struct task_struct *child) {} +static inline void ptrace_bts_untrace(struct task_struct *child) {} +#endif /* CONFIG_X86_PTRACE_BTS */ -static inline void bts_configure(const struct bts_configuration *cfg) +void x86_ptrace_fork(struct task_struct *child, unsigned long clone_flags) { - bts_cfg = *cfg; + ptrace_bts_fork(child); } -void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c) +void x86_ptrace_untrace(struct task_struct *child) { - switch (c->x86) { - case 0x6: - switch (c->x86_model) { - case 0xD: - case 0xE: /* Pentium M */ - bts_configure(&bts_cfg_pentium_m); - break; - case 0xF: /* Core2 */ - case 0x1C: /* Atom */ - bts_configure(&bts_cfg_core2); - break; - default: - /* sorry, don't know about them */ - break; - } - break; - case 0xF: - switch (c->x86_model) { - case 0x0: - case 0x1: - case 0x2: /* Netburst */ - bts_configure(&bts_cfg_netburst); - break; - default: - /* sorry, don't know about them */ - break; - } - break; - default: - /* sorry, don't know about them */ - break; - } + ptrace_bts_untrace(child); } -#endif /* CONFIG_X86_PTRACE_BTS */ /* * Called by kernel/ptrace.c when detaching.. @@ -972,15 +844,7 @@ void ptrace_disable(struct task_struct *child) #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif -#ifdef CONFIG_X86_PTRACE_BTS - (void)ds_release_bts(child); - - child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; - if (!child->thread.debugctlmsr) - clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); - - clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); -#endif /* CONFIG_X86_PTRACE_BTS */ + ptrace_bts_detach(child); } #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION @@ -1112,7 +976,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; case PTRACE_BTS_SIZE: - ret = ds_get_bts_index(child, /* pos = */ NULL); + ret = ptrace_bts_size(child); break; case PTRACE_BTS_GET: @@ -1121,7 +985,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; case PTRACE_BTS_CLEAR: - ret = ds_clear_bts(child); + ret = ptrace_bts_clear(child); break; case PTRACE_BTS_DRAIN: @@ -1384,6 +1248,14 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case PTRACE_GET_THREAD_AREA: case PTRACE_SET_THREAD_AREA: +#ifdef CONFIG_X86_PTRACE_BTS + case PTRACE_BTS_CONFIG: + case PTRACE_BTS_STATUS: + case PTRACE_BTS_SIZE: + case PTRACE_BTS_GET: + case PTRACE_BTS_CLEAR: + case PTRACE_BTS_DRAIN: +#endif /* CONFIG_X86_PTRACE_BTS */ return arch_ptrace(child, request, addr, data); default: diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index cc5a2545dd41..61f718df6eec 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -21,6 +21,9 @@ # include <asm/iommu.h> #endif +#include <mach_ipi.h> + + /* * Power off function, if any */ @@ -36,7 +39,10 @@ int reboot_force; static int reboot_cpu = -1; #endif -/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] +/* This is set by the PCI code if either type 1 or type 2 PCI is detected */ +bool port_cf9_safe = false; + +/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci] warm Don't set the cold reboot flag cold Set the cold reboot flag bios Reboot by jumping through the BIOS (only for X86_32) @@ -45,6 +51,7 @@ static int reboot_cpu = -1; kbd Use the keyboard controller. cold reset (default) acpi Use the RESET_REG in the FADT efi Use efi reset_system runtime service + pci Use the so-called "PCI reset register", CF9 force Avoid anything that could hang. */ static int __init reboot_setup(char *str) @@ -79,6 +86,7 @@ static int __init reboot_setup(char *str) case 'k': case 't': case 'e': + case 'p': reboot_type = *str; break; @@ -404,12 +412,27 @@ static void native_machine_emergency_restart(void) reboot_type = BOOT_KBD; break; - case BOOT_EFI: if (efi_enabled) - efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD, + efi.reset_system(reboot_mode ? + EFI_RESET_WARM : + EFI_RESET_COLD, EFI_SUCCESS, 0, NULL); + reboot_type = BOOT_KBD; + break; + case BOOT_CF9: + port_cf9_safe = true; + /* fall through */ + + case BOOT_CF9_COND: + if (port_cf9_safe) { + u8 cf9 = inb(0xcf9) & ~6; + outb(cf9|2, 0xcf9); /* Request hard reset */ + udelay(50); + outb(cf9|6, 0xcf9); /* Actually do the reset */ + udelay(50); + } reboot_type = BOOT_KBD; break; } @@ -470,6 +493,11 @@ static void native_machine_restart(char *__unused) static void native_machine_halt(void) { + /* stop other cpus and apics */ + machine_shutdown(); + + /* stop this cpu */ + stop_this_cpu(NULL); } static void native_machine_power_off(void) @@ -523,3 +551,95 @@ void machine_crash_shutdown(struct pt_regs *regs) machine_ops.crash_shutdown(regs); } #endif + + +#if defined(CONFIG_SMP) + +/* This keeps a track of which one is crashing cpu. */ +static int crashing_cpu; +static nmi_shootdown_cb shootdown_callback; + +static atomic_t waiting_for_crash_ipi; + +static int crash_nmi_callback(struct notifier_block *self, + unsigned long val, void *data) +{ + int cpu; + + if (val != DIE_NMI_IPI) + return NOTIFY_OK; + + cpu = raw_smp_processor_id(); + + /* Don't do anything if this handler is invoked on crashing cpu. + * Otherwise, system will completely hang. Crashing cpu can get + * an NMI if system was initially booted with nmi_watchdog parameter. + */ + if (cpu == crashing_cpu) + return NOTIFY_STOP; + local_irq_disable(); + + shootdown_callback(cpu, (struct die_args *)data); + + atomic_dec(&waiting_for_crash_ipi); + /* Assume hlt works */ + halt(); + for (;;) + cpu_relax(); + + return 1; +} + +static void smp_send_nmi_allbutself(void) +{ + cpumask_t mask = cpu_online_map; + cpu_clear(safe_smp_processor_id(), mask); + if (!cpus_empty(mask)) + send_IPI_mask(mask, NMI_VECTOR); +} + +static struct notifier_block crash_nmi_nb = { + .notifier_call = crash_nmi_callback, +}; + +/* Halt all other CPUs, calling the specified function on each of them + * + * This function can be used to halt all other CPUs on crash + * or emergency reboot time. The function passed as parameter + * will be called inside a NMI handler on all CPUs. + */ +void nmi_shootdown_cpus(nmi_shootdown_cb callback) +{ + unsigned long msecs; + local_irq_disable(); + + /* Make a note of crashing cpu. Will be used in NMI callback.*/ + crashing_cpu = safe_smp_processor_id(); + + shootdown_callback = callback; + + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + /* Would it be better to replace the trap vector here? */ + if (register_die_notifier(&crash_nmi_nb)) + return; /* return what? */ + /* Ensure the new callback function is set before sending + * out the NMI + */ + wmb(); + + smp_send_nmi_allbutself(); + + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + + /* Leave the nmi callback set */ +} +#else /* !CONFIG_SMP */ +void nmi_shootdown_cpus(nmi_shootdown_cb callback) +{ + /* No other CPUs to shoot down */ +} +#endif diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 6f50664b2ba5..a160f3119725 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -10,15 +10,12 @@ #include <asm/page.h> #include <asm/kexec.h> #include <asm/processor-flags.h> -#include <asm/pgtable.h> /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 2) -#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) -#define PAE_PGD_ATTR (_PAGE_PRESENT) /* control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for @@ -39,7 +36,6 @@ #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c) .text - .align PAGE_SIZE .globl relocate_kernel relocate_kernel: /* Save the CPU context, used for jumping back */ @@ -60,117 +56,6 @@ relocate_kernel: movl %cr4, %eax movl %eax, CR4(%edi) -#ifdef CONFIG_X86_PAE - /* map the control page at its virtual address */ - - movl PTR(VA_PGD)(%ebp), %edi - movl PTR(VA_CONTROL_PAGE)(%ebp), %eax - andl $0xc0000000, %eax - shrl $27, %eax - addl %edi, %eax - - movl PTR(PA_PMD_0)(%ebp), %edx - orl $PAE_PGD_ATTR, %edx - movl %edx, (%eax) - - movl PTR(VA_PMD_0)(%ebp), %edi - movl PTR(VA_CONTROL_PAGE)(%ebp), %eax - andl $0x3fe00000, %eax - shrl $18, %eax - addl %edi, %eax - - movl PTR(PA_PTE_0)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) - - movl PTR(VA_PTE_0)(%ebp), %edi - movl PTR(VA_CONTROL_PAGE)(%ebp), %eax - andl $0x001ff000, %eax - shrl $9, %eax - addl %edi, %eax - - movl PTR(PA_CONTROL_PAGE)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) - - /* identity map the control page at its physical address */ - - movl PTR(VA_PGD)(%ebp), %edi - movl PTR(PA_CONTROL_PAGE)(%ebp), %eax - andl $0xc0000000, %eax - shrl $27, %eax - addl %edi, %eax - - movl PTR(PA_PMD_1)(%ebp), %edx - orl $PAE_PGD_ATTR, %edx - movl %edx, (%eax) - - movl PTR(VA_PMD_1)(%ebp), %edi - movl PTR(PA_CONTROL_PAGE)(%ebp), %eax - andl $0x3fe00000, %eax - shrl $18, %eax - addl %edi, %eax - - movl PTR(PA_PTE_1)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) - - movl PTR(VA_PTE_1)(%ebp), %edi - movl PTR(PA_CONTROL_PAGE)(%ebp), %eax - andl $0x001ff000, %eax - shrl $9, %eax - addl %edi, %eax - - movl PTR(PA_CONTROL_PAGE)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) -#else - /* map the control page at its virtual address */ - - movl PTR(VA_PGD)(%ebp), %edi - movl PTR(VA_CONTROL_PAGE)(%ebp), %eax - andl $0xffc00000, %eax - shrl $20, %eax - addl %edi, %eax - - movl PTR(PA_PTE_0)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) - - movl PTR(VA_PTE_0)(%ebp), %edi - movl PTR(VA_CONTROL_PAGE)(%ebp), %eax - andl $0x003ff000, %eax - shrl $10, %eax - addl %edi, %eax - - movl PTR(PA_CONTROL_PAGE)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) - - /* identity map the control page at its physical address */ - - movl PTR(VA_PGD)(%ebp), %edi - movl PTR(PA_CONTROL_PAGE)(%ebp), %eax - andl $0xffc00000, %eax - shrl $20, %eax - addl %edi, %eax - - movl PTR(PA_PTE_1)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) - - movl PTR(VA_PTE_1)(%ebp), %edi - movl PTR(PA_CONTROL_PAGE)(%ebp), %eax - andl $0x003ff000, %eax - shrl $10, %eax - addl %edi, %eax - - movl PTR(PA_CONTROL_PAGE)(%ebp), %edx - orl $PAGE_ATTR, %edx - movl %edx, (%eax) -#endif - -relocate_new_kernel: /* read the arguments and say goodbye to the stack */ movl 20+4(%esp), %ebx /* page_list */ movl 20+8(%esp), %ebp /* list of pages */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index bdec76e55594..08e02e8453c9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -93,11 +93,13 @@ #include <asm/desc.h> #include <asm/dma.h> #include <asm/iommu.h> +#include <asm/gart.h> #include <asm/mmu_context.h> #include <asm/proto.h> #include <mach_apic.h> #include <asm/paravirt.h> +#include <asm/hypervisor.h> #include <asm/percpu.h> #include <asm/topology.h> @@ -448,6 +450,7 @@ static void __init reserve_early_setup_data(void) * @size: Size of the crashkernel memory to reserve. * Returns the base address on success, and -1ULL on failure. */ +static unsigned long long __init find_and_reserve_crashkernel(unsigned long long size) { const unsigned long long alignment = 16<<20; /* 16M */ @@ -583,161 +586,24 @@ static int __init setup_elfcorehdr(char *arg) early_param("elfcorehdr", setup_elfcorehdr); #endif -static struct x86_quirks default_x86_quirks __initdata; - -struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; - -/* - * Some BIOSes seem to corrupt the low 64k of memory during events - * like suspend/resume and unplugging an HDMI cable. Reserve all - * remaining free memory in that area and fill it with a distinct - * pattern. - */ -#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION -#define MAX_SCAN_AREAS 8 - -static int __read_mostly memory_corruption_check = -1; - -static unsigned __read_mostly corruption_check_size = 64*1024; -static unsigned __read_mostly corruption_check_period = 60; /* seconds */ - -static struct e820entry scan_areas[MAX_SCAN_AREAS]; -static int num_scan_areas; - - -static int set_corruption_check(char *arg) -{ - char *end; - - memory_corruption_check = simple_strtol(arg, &end, 10); - - return (*end == 0) ? 0 : -EINVAL; -} -early_param("memory_corruption_check", set_corruption_check); - -static int set_corruption_check_period(char *arg) -{ - char *end; - - corruption_check_period = simple_strtoul(arg, &end, 10); - - return (*end == 0) ? 0 : -EINVAL; -} -early_param("memory_corruption_check_period", set_corruption_check_period); - -static int set_corruption_check_size(char *arg) +static int __init default_update_genapic(void) { - char *end; - unsigned size; - - size = memparse(arg, &end); - - if (*end == '\0') - corruption_check_size = size; - - return (size == corruption_check_size) ? 0 : -EINVAL; -} -early_param("memory_corruption_check_size", set_corruption_check_size); - - -static void __init setup_bios_corruption_check(void) -{ - u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */ - - if (memory_corruption_check == -1) { - memory_corruption_check = -#ifdef CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK - 1 -#else - 0 +#ifdef CONFIG_X86_SMP +# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64) + genapic->wakeup_cpu = wakeup_secondary_cpu_via_init; +# endif #endif - ; - } - - if (corruption_check_size == 0) - memory_corruption_check = 0; - - if (!memory_corruption_check) - return; - - corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); - while(addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { - u64 size; - addr = find_e820_area_size(addr, &size, PAGE_SIZE); - - if (addr == 0) - break; - - if ((addr + size) > corruption_check_size) - size = corruption_check_size - addr; - - if (size == 0) - break; - - e820_update_range(addr, size, E820_RAM, E820_RESERVED); - scan_areas[num_scan_areas].addr = addr; - scan_areas[num_scan_areas].size = size; - num_scan_areas++; - - /* Assume we've already mapped this early memory */ - memset(__va(addr), 0, size); - - addr += size; - } - - printk(KERN_INFO "Scanning %d areas for low memory corruption\n", - num_scan_areas); - update_e820(); -} - -static struct timer_list periodic_check_timer; - -void check_for_bios_corruption(void) -{ - int i; - int corruption = 0; - - if (!memory_corruption_check) - return; - - for(i = 0; i < num_scan_areas; i++) { - unsigned long *addr = __va(scan_areas[i].addr); - unsigned long size = scan_areas[i].size; - - for(; size; addr++, size -= sizeof(unsigned long)) { - if (!*addr) - continue; - printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n", - addr, __pa(addr), *addr); - corruption = 1; - *addr = 0; - } - } - - WARN(corruption, KERN_ERR "Memory corruption detected in low memory\n"); -} - -static void periodic_check_for_corruption(unsigned long data) -{ - check_for_bios_corruption(); - mod_timer(&periodic_check_timer, round_jiffies(jiffies + corruption_check_period*HZ)); + return 0; } -void start_periodic_check_for_corruption(void) -{ - if (!memory_corruption_check || corruption_check_period == 0) - return; - - printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", - corruption_check_period); +static struct x86_quirks default_x86_quirks __initdata = { + .update_genapic = default_update_genapic, +}; - init_timer(&periodic_check_timer); - periodic_check_timer.function = &periodic_check_for_corruption; - periodic_check_for_corruption(0); -} -#endif +struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; +#ifdef CONFIG_X86_RESERVE_LOW_64K static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) { printk(KERN_NOTICE @@ -749,6 +615,7 @@ static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) return 0; } +#endif /* List of systems that have known low memory corruption BIOS problems */ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { @@ -907,6 +774,12 @@ void __init setup_arch(char **cmdline_p) dmi_check_system(bad_bios_dmi_table); + /* + * VMware detection requires dmi to be available, so this + * needs to be done after dmi_scan_machine, for the BP. + */ + init_hypervisor(&boot_cpu_data); + #ifdef CONFIG_X86_32 probe_roms(); #endif diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h deleted file mode 100644 index cc673aa55ce4..000000000000 --- a/arch/x86/kernel/sigframe.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifdef CONFIG_X86_32 -struct sigframe { - char __user *pretcode; - int sig; - struct sigcontext sc; - /* - * fpstate is unused. fpstate is moved/allocated after - * retcode[] below. This movement allows to have the FP state and the - * future state extensions (xsave) stay together. - * And at the same time retaining the unused fpstate, prevents changing - * the offset of extramask[] in the sigframe and thus prevent any - * legacy application accessing/modifying it. - */ - struct _fpstate fpstate_unused; - unsigned long extramask[_NSIG_WORDS-1]; - char retcode[8]; - /* fp state follows here */ -}; - -struct rt_sigframe { - char __user *pretcode; - int sig; - struct siginfo __user *pinfo; - void __user *puc; - struct siginfo info; - struct ucontext uc; - char retcode[8]; - /* fp state follows here */ -}; -#else -struct rt_sigframe { - char __user *pretcode; - struct ucontext uc; - struct siginfo info; - /* fp state follows here */ -}; - -int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs); -int ia32_setup_frame(int sig, struct k_sigaction *ka, - sigset_t *set, struct pt_regs *regs); -#endif diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal.c index d6dd057d0f22..89bb7668041d 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal.c @@ -1,36 +1,41 @@ /* * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes + * 2000-2002 x86-64 support by Andi Kleen */ -#include <linux/list.h> -#include <linux/personality.h> -#include <linux/binfmts.h> -#include <linux/suspend.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> #include <linux/kernel.h> -#include <linux/ptrace.h> #include <linux/signal.h> -#include <linux/stddef.h> -#include <linux/unistd.h> #include <linux/errno.h> -#include <linux/sched.h> #include <linux/wait.h> +#include <linux/ptrace.h> #include <linux/tracehook.h> -#include <linux/elf.h> -#include <linux/smp.h> -#include <linux/mm.h> +#include <linux/unistd.h> +#include <linux/stddef.h> +#include <linux/personality.h> +#include <linux/uaccess.h> #include <asm/processor.h> #include <asm/ucontext.h> -#include <asm/uaccess.h> #include <asm/i387.h> #include <asm/vdso.h> + +#ifdef CONFIG_X86_64 +#include <asm/proto.h> +#include <asm/ia32_unistd.h> +#include <asm/mce.h> +#endif /* CONFIG_X86_64 */ + #include <asm/syscall.h> #include <asm/syscalls.h> -#include "sigframe.h" +#include <asm/sigframe.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -45,74 +50,6 @@ # define FIX_EFLAGS __FIX_EFLAGS #endif -/* - * Atomically swap in the new signal mask, and wait for a signal. - */ -asmlinkage int -sys_sigsuspend(int history0, int history1, old_sigset_t mask) -{ - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - current->state = TASK_INTERRUPTIBLE; - schedule(); - set_restore_sigmask(); - - return -ERESTARTNOHAND; -} - -asmlinkage int -sys_sigaction(int sig, const struct old_sigaction __user *act, - struct old_sigaction __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - if (act) { - old_sigset_t mask; - - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) - return -EFAULT; - - __get_user(new_ka.sa.sa_flags, &act->sa_flags); - __get_user(mask, &act->sa_mask); - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) - return -EFAULT; - - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); - } - - return ret; -} - -asmlinkage int sys_sigaltstack(unsigned long bx) -{ - /* - * This is needed to make gcc realize it doesn't own the - * "struct pt_regs" - */ - struct pt_regs *regs = (struct pt_regs *)&bx; - const stack_t __user *uss = (const stack_t __user *)bx; - stack_t __user *uoss = (stack_t __user *)regs->cx; - - return do_sigaltstack(uss, uoss, regs->sp); -} - #define COPY(x) { \ err |= __get_user(regs->x, &sc->x); \ } @@ -123,7 +60,7 @@ asmlinkage int sys_sigaltstack(unsigned long bx) regs->seg = tmp; \ } -#define COPY_SEG_STRICT(seg) { \ +#define COPY_SEG_CPL3(seg) { \ unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ regs->seg = tmp | 3; \ @@ -135,9 +72,6 @@ asmlinkage int sys_sigaltstack(unsigned long bx) loadsegment(seg, tmp); \ } -/* - * Do a signal return; undo the signal stack. - */ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *pax) @@ -149,14 +83,36 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; +#ifdef CONFIG_X86_32 GET_SEG(gs); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); +#endif /* CONFIG_X86_32 */ + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); - COPY_SEG_STRICT(cs); - COPY_SEG_STRICT(ss); + +#ifdef CONFIG_X86_64 + COPY(r8); + COPY(r9); + COPY(r10); + COPY(r11); + COPY(r12); + COPY(r13); + COPY(r14); + COPY(r15); +#endif /* CONFIG_X86_64 */ + +#ifdef CONFIG_X86_32 + COPY_SEG_CPL3(cs); + COPY_SEG_CPL3(ss); +#else /* !CONFIG_X86_32 */ + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + COPY_SEG_CPL3(cs); +#endif /* CONFIG_X86_32 */ err |= __get_user(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); @@ -169,102 +125,24 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, return err; } -asmlinkage unsigned long sys_sigreturn(unsigned long __unused) -{ - struct sigframe __user *frame; - struct pt_regs *regs; - unsigned long ax; - sigset_t set; - - regs = (struct pt_regs *) &__unused; - frame = (struct sigframe __user *)(regs->sp - 8); - - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 - && __copy_from_user(&set.sig[1], &frame->extramask, - sizeof(frame->extramask)))) - goto badframe; - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (restore_sigcontext(regs, &frame->sc, &ax)) - goto badframe; - return ax; - -badframe: - if (show_unhandled_signals && printk_ratelimit()) { - printk("%s%s[%d] bad frame in sigreturn frame:" - "%p ip:%lx sp:%lx oeax:%lx", - task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, - current->comm, task_pid_nr(current), frame, regs->ip, - regs->sp, regs->orig_ax); - print_vma_addr(" in ", regs->ip); - printk(KERN_CONT "\n"); - } - - force_sig(SIGSEGV, current); - - return 0; -} - -static long do_rt_sigreturn(struct pt_regs *regs) -{ - struct rt_sigframe __user *frame; - unsigned long ax; - sigset_t set; - - frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) - goto badframe; - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) - goto badframe; - - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) - goto badframe; - - return ax; - -badframe: - signal_fault(regs, frame, "rt_sigreturn"); - return 0; -} - -asmlinkage int sys_rt_sigreturn(unsigned long __unused) -{ - struct pt_regs *regs = (struct pt_regs *)&__unused; - - return do_rt_sigreturn(regs); -} - -/* - * Set up a signal frame. - */ static int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask) { - int tmp, err = 0; + int err = 0; - err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); - savesegment(gs, tmp); - err |= __put_user(tmp, (unsigned int __user *)&sc->gs); +#ifdef CONFIG_X86_32 + { + unsigned int tmp; + savesegment(gs, tmp); + err |= __put_user(tmp, (unsigned int __user *)&sc->gs); + } + err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); err |= __put_user(regs->es, (unsigned int __user *)&sc->es); err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds); +#endif /* CONFIG_X86_32 */ + err |= __put_user(regs->di, &sc->di); err |= __put_user(regs->si, &sc->si); err |= __put_user(regs->bp, &sc->bp); @@ -273,19 +151,33 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, err |= __put_user(regs->dx, &sc->dx); err |= __put_user(regs->cx, &sc->cx); err |= __put_user(regs->ax, &sc->ax); +#ifdef CONFIG_X86_64 + err |= __put_user(regs->r8, &sc->r8); + err |= __put_user(regs->r9, &sc->r9); + err |= __put_user(regs->r10, &sc->r10); + err |= __put_user(regs->r11, &sc->r11); + err |= __put_user(regs->r12, &sc->r12); + err |= __put_user(regs->r13, &sc->r13); + err |= __put_user(regs->r14, &sc->r14); + err |= __put_user(regs->r15, &sc->r15); +#endif /* CONFIG_X86_64 */ + err |= __put_user(current->thread.trap_no, &sc->trapno); err |= __put_user(current->thread.error_code, &sc->err); err |= __put_user(regs->ip, &sc->ip); +#ifdef CONFIG_X86_32 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); err |= __put_user(regs->flags, &sc->flags); err |= __put_user(regs->sp, &sc->sp_at_signal); err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); +#else /* !CONFIG_X86_32 */ + err |= __put_user(regs->flags, &sc->flags); + err |= __put_user(regs->cs, &sc->cs); + err |= __put_user(0, &sc->gs); + err |= __put_user(0, &sc->fs); +#endif /* CONFIG_X86_32 */ - tmp = save_i387_xstate(fpstate); - if (tmp < 0) - err = 1; - else - err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); + err |= __put_user(fpstate, &sc->fpstate); /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); @@ -295,6 +187,32 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, } /* + * Set up a signal frame. + */ +#ifdef CONFIG_X86_32 +static const struct { + u16 poplmovl; + u32 val; + u16 int80; +} __attribute__((packed)) retcode = { + 0xb858, /* popl %eax; movl $..., %eax */ + __NR_sigreturn, + 0x80cd, /* int $0x80 */ +}; + +static const struct { + u8 movl; + u32 val; + u16 int80; + u8 pad; +} __attribute__((packed)) rt_retcode = { + 0xb8, /* movl $..., %eax */ + __NR_rt_sigreturn, + 0x80cd, /* int $0x80 */ + 0 +}; + +/* * Determine which stack to use.. */ static inline void __user * @@ -328,6 +246,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (used_math()) { sp = sp - sig_xstate_size; *fpstate = (struct _fpstate *) sp; + if (save_i387_xstate(*fpstate) < 0) + return (void __user *)-1L; } sp -= frame_size; @@ -383,9 +303,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); - err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); - err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); + err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); if (err) return -EFAULT; @@ -454,9 +372,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); - err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); - err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); + err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); if (err) return -EFAULT; @@ -475,23 +391,293 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, return 0; } +#else /* !CONFIG_X86_32 */ +/* + * Determine which stack to use.. + */ +static void __user * +get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size) +{ + /* Default to using normal stack - redzone*/ + sp -= 128; + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (sas_ss_flags(sp) == 0) + sp = current->sas_ss_sp + current->sas_ss_size; + } + + return (void __user *)round_down(sp - size, 64); +} + +static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + void __user *fp = NULL; + int err = 0; + struct task_struct *me = current; + + if (used_math()) { + fp = get_stack(ka, regs->sp, sig_xstate_size); + frame = (void __user *)round_down( + (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; + + if (save_i387_xstate(fp) < 0) + return -EFAULT; + } else + frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8; + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + return -EFAULT; + + if (ka->sa.sa_flags & SA_SIGINFO) { + if (copy_siginfo_to_user(&frame->info, info)) + return -EFAULT; + } + + /* Create the ucontext. */ + if (cpu_has_xsave) + err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); + else + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->sp), + &frame->uc.uc_stack.ss_flags); + err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + /* x86-64 should always use SA_RESTORER. */ + if (ka->sa.sa_flags & SA_RESTORER) { + err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); + } else { + /* could use a vstub here */ + return -EFAULT; + } + + if (err) + return -EFAULT; + + /* Set up registers for signal handler */ + regs->di = sig; + /* In case the signal handler was declared without prototypes */ + regs->ax = 0; + + /* This also works for non SA_SIGINFO handlers because they expect the + next argument after the signal number on the stack. */ + regs->si = (unsigned long)&frame->info; + regs->dx = (unsigned long)&frame->uc; + regs->ip = (unsigned long) ka->sa.sa_handler; + + regs->sp = (unsigned long)frame; + + /* Set up the CS register to run signal handlers in 64-bit mode, + even if the handler happens to be interrupting 32-bit code. */ + regs->cs = __USER_CS; + + return 0; +} +#endif /* CONFIG_X86_32 */ + +#ifdef CONFIG_X86_32 +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ +asmlinkage int +sys_sigsuspend(int history0, int history1, old_sigset_t mask) +{ + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sighand->siglock); + current->saved_sigmask = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_restore_sigmask(); + + return -ERESTARTNOHAND; +} + +asmlinkage int +sys_sigaction(int sig, const struct old_sigaction __user *act, + struct old_sigaction __user *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t mask; + + if (!access_ok(VERIFY_READ, act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + return -EFAULT; + + __get_user(new_ka.sa.sa_flags, &act->sa_flags); + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + return -EFAULT; + + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} +#endif /* CONFIG_X86_32 */ + +#ifdef CONFIG_X86_32 +asmlinkage int sys_sigaltstack(unsigned long bx) +{ + /* + * This is needed to make gcc realize it doesn't own the + * "struct pt_regs" + */ + struct pt_regs *regs = (struct pt_regs *)&bx; + const stack_t __user *uss = (const stack_t __user *)bx; + stack_t __user *uoss = (stack_t __user *)regs->cx; + + return do_sigaltstack(uss, uoss, regs->sp); +} +#else /* !CONFIG_X86_32 */ +asmlinkage long +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, + struct pt_regs *regs) +{ + return do_sigaltstack(uss, uoss, regs->sp); +} +#endif /* CONFIG_X86_32 */ + +/* + * Do a signal return; undo the signal stack. + */ +#ifdef CONFIG_X86_32 +asmlinkage unsigned long sys_sigreturn(unsigned long __unused) +{ + struct sigframe __user *frame; + struct pt_regs *regs; + unsigned long ax; + sigset_t set; + + regs = (struct pt_regs *) &__unused; + frame = (struct sigframe __user *)(regs->sp - 8); + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 + && __copy_from_user(&set.sig[1], &frame->extramask, + sizeof(frame->extramask)))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->sc, &ax)) + goto badframe; + return ax; + +badframe: + signal_fault(regs, frame, "sigreturn"); + + return 0; +} +#endif /* CONFIG_X86_32 */ + +static long do_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + unsigned long ax; + sigset_t set; + + frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) + goto badframe; + + if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) + goto badframe; + + return ax; + +badframe: + signal_fault(regs, frame, "rt_sigreturn"); + return 0; +} + +#ifdef CONFIG_X86_32 +asmlinkage int sys_rt_sigreturn(struct pt_regs regs) +{ + return do_rt_sigreturn(®s); +} +#else /* !CONFIG_X86_32 */ +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +{ + return do_rt_sigreturn(regs); +} +#endif /* CONFIG_X86_32 */ /* * OK, we're invoking a handler: */ static int signr_convert(int sig) { +#ifdef CONFIG_X86_32 struct thread_info *info = current_thread_info(); if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32) return info->exec_domain->signal_invmap[sig]; +#endif /* CONFIG_X86_32 */ return sig; } +#ifdef CONFIG_X86_32 + #define is_ia32 1 #define ia32_setup_frame __setup_frame #define ia32_setup_rt_frame __setup_rt_frame +#else /* !CONFIG_X86_32 */ + +#ifdef CONFIG_IA32_EMULATION +#define is_ia32 test_thread_flag(TIF_IA32) +#else /* !CONFIG_IA32_EMULATION */ +#define is_ia32 0 +#endif /* CONFIG_IA32_EMULATION */ + +int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs); +int ia32_setup_frame(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs *regs); + +#endif /* CONFIG_X86_32 */ + static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) @@ -592,7 +778,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, return 0; } +#ifdef CONFIG_X86_32 #define NR_restart_syscall __NR_restart_syscall +#else /* !CONFIG_X86_32 */ +#define NR_restart_syscall \ + test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall +#endif /* CONFIG_X86_32 */ + /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by @@ -704,8 +896,9 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) struct task_struct *me = current; if (show_unhandled_signals && printk_ratelimit()) { - printk(KERN_INFO + printk("%s" "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", + task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); print_vma_addr(" in ", regs->ip); diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c deleted file mode 100644 index a5c9627f4db9..000000000000 --- a/arch/x86/kernel/signal_64.c +++ /dev/null @@ -1,516 +0,0 @@ -/* - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs - * - * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson - * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes - * 2000-2002 x86-64 support by Andi Kleen - */ - -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/errno.h> -#include <linux/wait.h> -#include <linux/ptrace.h> -#include <linux/tracehook.h> -#include <linux/unistd.h> -#include <linux/stddef.h> -#include <linux/personality.h> -#include <linux/compiler.h> -#include <linux/uaccess.h> - -#include <asm/processor.h> -#include <asm/ucontext.h> -#include <asm/i387.h> -#include <asm/proto.h> -#include <asm/ia32_unistd.h> -#include <asm/mce.h> -#include <asm/syscall.h> -#include <asm/syscalls.h> -#include "sigframe.h" - -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - -#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ - X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ - X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ - X86_EFLAGS_CF) - -#ifdef CONFIG_X86_32 -# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) -#else -# define FIX_EFLAGS __FIX_EFLAGS -#endif - -asmlinkage long -sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - struct pt_regs *regs) -{ - return do_sigaltstack(uss, uoss, regs->sp); -} - -#define COPY(x) { \ - err |= __get_user(regs->x, &sc->x); \ -} - -#define COPY_SEG_STRICT(seg) { \ - unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ - regs->seg = tmp | 3; \ -} - -/* - * Do a signal return; undo the signal stack. - */ -static int -restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, - unsigned long *pax) -{ - void __user *buf; - unsigned int tmpflags; - unsigned int err = 0; - - /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; - - COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); - COPY(dx); COPY(cx); COPY(ip); - COPY(r8); - COPY(r9); - COPY(r10); - COPY(r11); - COPY(r12); - COPY(r13); - COPY(r14); - COPY(r15); - - /* Kernel saves and restores only the CS segment register on signals, - * which is the bare minimum needed to allow mixed 32/64-bit code. - * App's signal handler can save/restore other segments if needed. */ - COPY_SEG_STRICT(cs); - - err |= __get_user(tmpflags, &sc->flags); - regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); - regs->orig_ax = -1; /* disable syscall checks */ - - err |= __get_user(buf, &sc->fpstate); - err |= restore_i387_xstate(buf); - - err |= __get_user(*pax, &sc->ax); - return err; -} - -static long do_rt_sigreturn(struct pt_regs *regs) -{ - struct rt_sigframe __user *frame; - unsigned long ax; - sigset_t set; - - frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) - goto badframe; - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) - goto badframe; - - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) - goto badframe; - - return ax; - -badframe: - signal_fault(regs, frame, "rt_sigreturn"); - return 0; -} - -asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) -{ - return do_rt_sigreturn(regs); -} - -/* - * Set up a signal frame. - */ - -static inline int -setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, - unsigned long mask, struct task_struct *me) -{ - int err = 0; - - err |= __put_user(regs->cs, &sc->cs); - err |= __put_user(0, &sc->gs); - err |= __put_user(0, &sc->fs); - - err |= __put_user(regs->di, &sc->di); - err |= __put_user(regs->si, &sc->si); - err |= __put_user(regs->bp, &sc->bp); - err |= __put_user(regs->sp, &sc->sp); - err |= __put_user(regs->bx, &sc->bx); - err |= __put_user(regs->dx, &sc->dx); - err |= __put_user(regs->cx, &sc->cx); - err |= __put_user(regs->ax, &sc->ax); - err |= __put_user(regs->r8, &sc->r8); - err |= __put_user(regs->r9, &sc->r9); - err |= __put_user(regs->r10, &sc->r10); - err |= __put_user(regs->r11, &sc->r11); - err |= __put_user(regs->r12, &sc->r12); - err |= __put_user(regs->r13, &sc->r13); - err |= __put_user(regs->r14, &sc->r14); - err |= __put_user(regs->r15, &sc->r15); - err |= __put_user(me->thread.trap_no, &sc->trapno); - err |= __put_user(me->thread.error_code, &sc->err); - err |= __put_user(regs->ip, &sc->ip); - err |= __put_user(regs->flags, &sc->flags); - err |= __put_user(mask, &sc->oldmask); - err |= __put_user(me->thread.cr2, &sc->cr2); - - return err; -} - -/* - * Determine which stack to use.. - */ - -static void __user * -get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) -{ - unsigned long sp; - - /* Default to using normal stack - redzone*/ - sp = regs->sp - 128; - - /* This is the X/Open sanctioned signal stack switching. */ - if (ka->sa.sa_flags & SA_ONSTACK) { - if (sas_ss_flags(sp) == 0) - sp = current->sas_ss_sp + current->sas_ss_size; - } - - return (void __user *)round_down(sp - size, 64); -} - -static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) -{ - struct rt_sigframe __user *frame; - void __user *fp = NULL; - int err = 0; - struct task_struct *me = current; - - if (used_math()) { - fp = get_stack(ka, regs, sig_xstate_size); - frame = (void __user *)round_down( - (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; - - if (save_i387_xstate(fp) < 0) - return -EFAULT; - } else - frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; - - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - return -EFAULT; - - if (ka->sa.sa_flags & SA_SIGINFO) { - if (copy_siginfo_to_user(&frame->info, info)) - return -EFAULT; - } - - /* Create the ucontext. */ - if (cpu_has_xsave) - err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); - else - err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); - err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); - if (sizeof(*set) == 16) { - __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); - __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); - } else - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - - /* Set up to return from userspace. If provided, use a stub - already in userspace. */ - /* x86-64 should always use SA_RESTORER. */ - if (ka->sa.sa_flags & SA_RESTORER) { - err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); - } else { - /* could use a vstub here */ - return -EFAULT; - } - - if (err) - return -EFAULT; - - /* Set up registers for signal handler */ - regs->di = sig; - /* In case the signal handler was declared without prototypes */ - regs->ax = 0; - - /* This also works for non SA_SIGINFO handlers because they expect the - next argument after the signal number on the stack. */ - regs->si = (unsigned long)&frame->info; - regs->dx = (unsigned long)&frame->uc; - regs->ip = (unsigned long) ka->sa.sa_handler; - - regs->sp = (unsigned long)frame; - - /* Set up the CS register to run signal handlers in 64-bit mode, - even if the handler happens to be interrupting 32-bit code. */ - regs->cs = __USER_CS; - - return 0; -} - -/* - * OK, we're invoking a handler - */ -static int signr_convert(int sig) -{ - return sig; -} - -#ifdef CONFIG_IA32_EMULATION -#define is_ia32 test_thread_flag(TIF_IA32) -#else -#define is_ia32 0 -#endif - -static int -setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) -{ - int usig = signr_convert(sig); - int ret; - - /* Set up the stack frame */ - if (is_ia32) { - if (ka->sa.sa_flags & SA_SIGINFO) - ret = ia32_setup_rt_frame(usig, ka, info, set, regs); - else - ret = ia32_setup_frame(usig, ka, set, regs); - } else - ret = __setup_rt_frame(sig, ka, info, set, regs); - - if (ret) { - force_sigsegv(sig, current); - return -EFAULT; - } - - return ret; -} - -static int -handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) -{ - int ret; - - /* Are we from a system call? */ - if (syscall_get_nr(current, regs) >= 0) { - /* If so, check system call restarting.. */ - switch (syscall_get_error(current, regs)) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - regs->ax = -EINTR; - break; - - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->ax = -EINTR; - break; - } - /* fallthrough */ - case -ERESTARTNOINTR: - regs->ax = regs->orig_ax; - regs->ip -= 2; - break; - } - } - - /* - * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF - * flag so that register information in the sigcontext is correct. - */ - if (unlikely(regs->flags & X86_EFLAGS_TF) && - likely(test_and_clear_thread_flag(TIF_FORCED_TF))) - regs->flags &= ~X86_EFLAGS_TF; - - ret = setup_rt_frame(sig, ka, info, oldset, regs); - - if (ret) - return ret; - -#ifdef CONFIG_X86_64 - /* - * This has nothing to do with segment registers, - * despite the name. This magic affects uaccess.h - * macros' behavior. Reset it to the normal setting. - */ - set_fs(USER_DS); -#endif - - /* - * Clear the direction flag as per the ABI for function entry. - */ - regs->flags &= ~X86_EFLAGS_DF; - - /* - * Clear TF when entering the signal handler, but - * notify any tracer that was single-stepping it. - * The tracer may want to single-step inside the - * handler too. - */ - regs->flags &= ~X86_EFLAGS_TF; - - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - tracehook_signal_handler(sig, info, ka, regs, - test_thread_flag(TIF_SINGLESTEP)); - - return 0; -} - -#define NR_restart_syscall \ - test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall -/* - * Note that 'init' is a special process: it doesn't get signals it doesn't - * want to handle. Thus you cannot kill init even with a SIGKILL even by - * mistake. - */ -static void do_signal(struct pt_regs *regs) -{ - struct k_sigaction ka; - siginfo_t info; - int signr; - sigset_t *oldset; - - /* - * We want the common case to go fast, which is why we may in certain - * cases get here from kernel mode. Just return without doing anything - * if so. - * X86_32: vm86 regs switched out by assembly code before reaching - * here, so testing against kernel CS suffices. - */ - if (!user_mode(regs)) - return; - - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) { - /* - * Re-enable any watchpoints before delivering the - * signal to user space. The processor register will - * have been cleared if the watchpoint triggered - * inside the kernel. - */ - if (current->thread.debugreg7) - set_debugreg(current->thread.debugreg7, 7); - - /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - } - return; - } - - /* Did we come from a system call? */ - if (syscall_get_nr(current, regs) >= 0) { - /* Restart the system call - no handlers present */ - switch (syscall_get_error(current, regs)) { - case -ERESTARTNOHAND: - case -ERESTARTSYS: - case -ERESTARTNOINTR: - regs->ax = regs->orig_ax; - regs->ip -= 2; - break; - - case -ERESTART_RESTARTBLOCK: - regs->ax = NR_restart_syscall; - regs->ip -= 2; - break; - } - } - - /* - * If there's no signal to deliver, we just put the saved sigmask - * back. - */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } -} - -/* - * notification of userspace execution resumption - * - triggered by the TIF_WORK_MASK flags - */ -void -do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) -{ -#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) - /* notify userspace of pending MCEs */ - if (thread_info_flags & _TIF_MCE_NOTIFY) - mce_notify_user(); -#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ - - /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) - do_signal(regs); - - if (thread_info_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } - -#ifdef CONFIG_X86_32 - clear_thread_flag(TIF_IRET); -#endif /* CONFIG_X86_32 */ -} - -void signal_fault(struct pt_regs *regs, void __user *frame, char *where) -{ - struct task_struct *me = current; - - if (show_unhandled_signals && printk_ratelimit()) { - printk(KERN_INFO - "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", - me->comm, me->pid, where, frame, - regs->ip, regs->sp, regs->orig_ax); - print_vma_addr(" in ", regs->ip); - printk(KERN_CONT "\n"); - } - - force_sig(SIGSEGV, me); -} diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 18f9b19f5f8f..7e558db362c1 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -140,19 +140,6 @@ void native_send_call_func_ipi(cpumask_t mask) send_IPI_mask(mask, CALL_FUNCTION_VECTOR); } -static void stop_this_cpu(void *dummy) -{ - local_irq_disable(); - /* - * Remove this CPU: - */ - cpu_clear(smp_processor_id(), cpu_online_map); - disable_local_APIC(); - if (hlt_works(smp_processor_id())) - for (;;) halt(); - for (;;); -} - /* * this function calls the 'stop' function on all other CPUs in the system. */ @@ -178,11 +165,7 @@ static void native_smp_send_stop(void) void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_resched_count++; -#else - add_pda(irq_resched_count, 1); -#endif + inc_irq_stat(irq_resched_count); } void smp_call_function_interrupt(struct pt_regs *regs) @@ -190,11 +173,7 @@ void smp_call_function_interrupt(struct pt_regs *regs) ack_APIC_irq(); irq_enter(); generic_smp_call_function_interrupt(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_call_count++; -#else - add_pda(irq_call_count, 1); -#endif + inc_irq_stat(irq_call_count); irq_exit(); } @@ -203,11 +182,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) ack_APIC_irq(); irq_enter(); generic_smp_call_function_single_interrupt(); -#ifdef CONFIG_X86_32 - __get_cpu_var(irq_stat).irq_call_count++; -#else - add_pda(irq_call_count, 1); -#endif + inc_irq_stat(irq_call_count); irq_exit(); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f71f96fc9e62..f8500c969442 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -62,6 +62,7 @@ #include <asm/mtrr.h> #include <asm/vmi.h> #include <asm/genapic.h> +#include <asm/setup.h> #include <linux/mc146818rtc.h> #include <mach_apic.h> @@ -287,7 +288,7 @@ static int __cpuinitdata unsafe_smp; /* * Activate a secondary processor. */ -static void __cpuinit start_secondary(void *unused) +notrace static void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too @@ -534,7 +535,7 @@ static void impress_friends(void) pr_debug("Before bogocount - setting activated=1.\n"); } -static inline void __inquire_remote_apic(int apicid) +void __inquire_remote_apic(int apicid) { unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; char *names[] = { "ID", "VERSION", "SPIV" }; @@ -573,14 +574,13 @@ static inline void __inquire_remote_apic(int apicid) } } -#ifdef WAKE_SECONDARY_VIA_NMI /* * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this * won't ... remember to clear down the APIC, etc later. */ -static int __devinit -wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) +int __devinit +wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) { unsigned long send_status, accept_status = 0; int maxlvt; @@ -597,7 +597,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) * Give the other CPU some time to accept the IPI. */ udelay(200); - if (APIC_INTEGRATED(apic_version[phys_apicid])) { + if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { maxlvt = lapic_get_maxlvt(); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); @@ -612,11 +612,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) return (send_status | accept_status); } -#endif /* WAKE_SECONDARY_VIA_NMI */ -#ifdef WAKE_SECONDARY_VIA_INIT -static int __devinit -wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) +int __devinit +wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) { unsigned long send_status, accept_status = 0; int maxlvt, num_starts, j; @@ -735,7 +733,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) return (send_status | accept_status); } -#endif /* WAKE_SECONDARY_VIA_INIT */ struct create_idle { struct work_struct work; @@ -1084,8 +1081,10 @@ static int __init smp_sanity_check(unsigned max_cpus) #endif if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { - printk(KERN_WARNING "weird, boot CPU (#%d) not listed" - "by the BIOS.\n", hard_smp_processor_id()); + printk(KERN_WARNING + "weird, boot CPU (#%d) not listed by the BIOS.\n", + hard_smp_processor_id()); + physid_set(hard_smp_processor_id(), phys_cpu_present_map); } diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index a03e7f6d90c3..10786af95545 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -6,6 +6,7 @@ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/module.h> +#include <linux/uaccess.h> #include <asm/stacktrace.h> static void save_stack_warning(void *data, char *msg) @@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ + +struct stack_frame { + const void __user *next_fp; + unsigned long ret_addr; +}; + +static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) +{ + int ret; + + if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) + return 0; + + ret = 1; + pagefault_disable(); + if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) + ret = 0; + pagefault_enable(); + + return ret; +} + +static inline void __save_stack_trace_user(struct stack_trace *trace) +{ + const struct pt_regs *regs = task_pt_regs(current); + const void __user *fp = (const void __user *)regs->bp; + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = regs->ip; + + while (trace->nr_entries < trace->max_entries) { + struct stack_frame frame; + + frame.next_fp = NULL; + frame.ret_addr = 0; + if (!copy_stack_frame(fp, &frame)) + break; + if ((unsigned long)fp < regs->sp) + break; + if (frame.ret_addr) { + trace->entries[trace->nr_entries++] = + frame.ret_addr; + } + if (fp == frame.next_fp) + break; + fp = frame.next_fp; + } +} + +void save_stack_trace_user(struct stack_trace *trace) +{ + /* + * Trace user stack if we are not a kernel thread + */ + if (current->mm) { + __save_stack_trace_user(trace); + } + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 77b400f06ea2..65309e4cb1c0 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c @@ -75,7 +75,7 @@ EXPORT_SYMBOL(profile_pc); irqreturn_t timer_interrupt(int irq, void *dev_id) { /* Keep nmi watchdog up to date */ - per_cpu(irq_stat, smp_processor_id()).irq0_irqs++; + inc_irq_stat(irq0_irqs); #ifdef CONFIG_X86_IO_APIC if (timer_ack) { diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index cb19d650c216..891e7a7c4334 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c @@ -49,9 +49,9 @@ unsigned long profile_pc(struct pt_regs *regs) } EXPORT_SYMBOL(profile_pc); -irqreturn_t timer_interrupt(int irq, void *dev_id) +static irqreturn_t timer_interrupt(int irq, void *dev_id) { - add_pda(irq0_irqs, 1); + inc_irq_stat(irq0_irqs); global_clock_event->event_handler(global_clock_event); @@ -80,6 +80,8 @@ unsigned long __init calibrate_cpu(void) break; no_ctr_free = (i == 4); if (no_ctr_free) { + WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " + "cpu_khz value may be incorrect.\n"); i = 3; rdmsrl(MSR_K7_EVNTSEL3, evntsel3); wrmsrl(MSR_K7_EVNTSEL3, 0); diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index f4049f3513b6..8da059f949be 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -34,9 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock); */ void leave_mm(int cpu) { - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) - BUG(); - cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); + BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK); + cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask); load_cr3(swapper_pg_dir); } EXPORT_SYMBOL_GPL(leave_mm); @@ -104,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) * BUG(); */ - if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { + if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) { + if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) { if (flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else @@ -119,7 +118,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs) smp_mb__after_clear_bit(); out: put_cpu_no_resched(); - __get_cpu_var(irq_stat).irq_tlb_count++; + inc_irq_stat(irq_tlb_count); } void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, @@ -238,7 +237,7 @@ static void do_flush_tlb_all(void *info) unsigned long cpu = smp_processor_id(); __flush_tlb_all(); - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) + if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(cpu); } diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 8f919ca69494..29887d7081a9 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -154,7 +154,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) out: ack_APIC_irq(); cpu_clear(cpu, f->flush_cpumask); - add_pda(irq_tlb_count, 1); + inc_irq_stat(irq_tlb_count); } void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 04431f34fd16..6a00e5faaa74 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -566,14 +566,10 @@ static int __init uv_ptc_init(void) if (!is_uv_system()) return 0; - if (!proc_mkdir("sgi_uv", NULL)) - return -EINVAL; - proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); if (!proc_uv_ptc) { printk(KERN_ERR "unable to create %s proc entry\n", UV_PTC_BASENAME); - remove_proc_entry("sgi_uv", NULL); return -EINVAL; } proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 1106fac6024d..808031a5ba19 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -1,10 +1,26 @@ #include <linux/io.h> #include <asm/trampoline.h> +#include <asm/e820.h> /* ready for x86_64 and x86 */ unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); +void __init reserve_trampoline_memory(void) +{ +#ifdef CONFIG_X86_32 + /* + * But first pinch a few for the stack/trampoline stuff + * FIXME: Don't need the extra page at 4K, but need to fix + * trampoline before removing it. (see the GDT stuff) + */ + reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); +#endif + /* Has to be in very low memory so we can execute real-mode AP code. */ + reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE, + "TRAMPOLINE"); +} + /* * Currently trivial. Write the real->protected mode * bootstrap into the page concerned. The caller @@ -12,7 +28,6 @@ unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); */ unsigned long setup_trampoline(void) { - memcpy(trampoline_base, trampoline_data, - trampoline_end - trampoline_data); + memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 04d242ab0161..141907ab6e22 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -481,11 +481,7 @@ do_nmi(struct pt_regs *regs, long error_code) { nmi_enter(); -#ifdef CONFIG_X86_32 - { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); } -#else - add_pda(__nmi_count, 1); -#endif + inc_irq_stat(__nmi_count); if (!ignore_nmis) default_do_nmi(regs); @@ -664,7 +660,7 @@ void math_error(void __user *ip) { struct task_struct *task; siginfo_t info; - unsigned short cwd, swd; + unsigned short cwd, swd, err; /* * Save the info for the exception handler and clear the error. @@ -675,7 +671,6 @@ void math_error(void __user *ip) task->thread.error_code = 0; info.si_signo = SIGFPE; info.si_errno = 0; - info.si_code = __SI_FAULT; info.si_addr = ip; /* * (~cwd & swd) will mask out exceptions that are not set to unmasked @@ -689,34 +684,31 @@ void math_error(void __user *ip) */ cwd = get_fpu_cwd(task); swd = get_fpu_swd(task); - switch (swd & ~cwd & 0x3f) { - case 0x000: /* No unmasked exception */ + + err = swd & ~cwd & 0x3f; + #ifdef CONFIG_X86_32 + if (!err) return; #endif - default: /* Multiple exceptions */ - break; - case 0x001: /* Invalid Op */ + + if (err & 0x001) { /* Invalid op */ /* * swd & 0x240 == 0x040: Stack Underflow * swd & 0x240 == 0x240: Stack Overflow * User must clear the SF bit (0x40) if set */ info.si_code = FPE_FLTINV; - break; - case 0x002: /* Denormalize */ - case 0x010: /* Underflow */ - info.si_code = FPE_FLTUND; - break; - case 0x004: /* Zero Divide */ + } else if (err & 0x004) { /* Divide by Zero */ info.si_code = FPE_FLTDIV; - break; - case 0x008: /* Overflow */ + } else if (err & 0x008) { /* Overflow */ info.si_code = FPE_FLTOVF; - break; - case 0x020: /* Precision */ + } else if (err & 0x012) { /* Denormal, Underflow */ + info.si_code = FPE_FLTUND; + } else if (err & 0x020) { /* Precision */ info.si_code = FPE_FLTRES; - break; + } else { + info.si_code = __SI_FAULT|SI_KERNEL; /* WTF? */ } force_sig_info(SIGFPE, &info, task); } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 424093b157d3..599e58168631 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -15,6 +15,7 @@ #include <asm/vgtod.h> #include <asm/time.h> #include <asm/delay.h> +#include <asm/hypervisor.h> unsigned int cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); @@ -31,6 +32,7 @@ static int tsc_unstable; erroneous rdtsc usage on !cpu_has_tsc processors */ static int tsc_disabled = -1; +static int tsc_clocksource_reliable; /* * Scheduler clock - returns current time in nanosec units. */ @@ -98,6 +100,15 @@ int __init notsc_setup(char *str) __setup("notsc", notsc_setup); +static int __init tsc_setup(char *str) +{ + if (!strcmp(str, "reliable")) + tsc_clocksource_reliable = 1; + return 1; +} + +__setup("tsc=", tsc_setup); + #define MAX_RETRIES 5 #define SMI_TRESHOLD 50000 @@ -352,9 +363,15 @@ unsigned long native_calibrate_tsc(void) { u64 tsc1, tsc2, delta, ref1, ref2; unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; - unsigned long flags, latch, ms, fast_calibrate; + unsigned long flags, latch, ms, fast_calibrate, tsc_khz; int hpet = is_hpet_enabled(), i, loopmin; + tsc_khz = get_hypervisor_tsc_freq(); + if (tsc_khz) { + printk(KERN_INFO "TSC: Frequency read from the hypervisor\n"); + return tsc_khz; + } + local_irq_save(flags); fast_calibrate = quick_pit_calibrate(); local_irq_restore(flags); @@ -731,24 +748,21 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { {} }; -/* - * Geode_LX - the OLPC CPU has a possibly a very reliable TSC - */ +static void __init check_system_tsc_reliable(void) +{ #ifdef CONFIG_MGEODE_LX -/* RTSC counts during suspend */ + /* RTSC counts during suspend */ #define RTSC_SUSP 0x100 - -static void __init check_geode_tsc_reliable(void) -{ unsigned long res_low, res_high; rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); + /* Geode_LX - the OLPC CPU has a possibly a very reliable TSC */ if (res_low & RTSC_SUSP) - clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; -} -#else -static inline void check_geode_tsc_reliable(void) { } + tsc_clocksource_reliable = 1; #endif + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) + tsc_clocksource_reliable = 1; +} /* * Make an educated guess if the TSC is trustworthy and synchronized @@ -783,6 +797,8 @@ static void __init init_tsc_clocksource(void) { clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, clocksource_tsc.shift); + if (tsc_clocksource_reliable) + clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; /* lower the rating if we already know its unstable: */ if (check_tsc_unstable()) { clocksource_tsc.rating = 0; @@ -843,7 +859,7 @@ void __init tsc_init(void) if (unsynchronized_tsc()) mark_tsc_unstable("TSCs unsynchronized"); - check_geode_tsc_reliable(); + check_system_tsc_reliable(); init_tsc_clocksource(); } diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 1c0dfbca87c1..bf36328f6ef9 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -112,6 +112,12 @@ void __cpuinit check_tsc_sync_source(int cpu) if (unsynchronized_tsc()) return; + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { + printk(KERN_INFO + "Skipping synchronization checks as TSC is reliable.\n"); + return; + } + printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:", smp_processor_id(), cpu); @@ -165,7 +171,7 @@ void __cpuinit check_tsc_sync_target(void) { int cpus = 2; - if (unsynchronized_tsc()) + if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) return; /* diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 22fd6577156a..23206ba16874 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -266,109 +266,6 @@ static void vmi_nop(void) { } -#ifdef CONFIG_DEBUG_PAGE_TYPE - -#ifdef CONFIG_X86_PAE -#define MAX_BOOT_PTS (2048+4+1) -#else -#define MAX_BOOT_PTS (1024+1) -#endif - -/* - * During boot, mem_map is not yet available in paging_init, so stash - * all the boot page allocations here. - */ -static struct { - u32 pfn; - int type; -} boot_page_allocations[MAX_BOOT_PTS]; -static int num_boot_page_allocations; -static int boot_allocations_applied; - -void vmi_apply_boot_page_allocations(void) -{ - int i; - BUG_ON(!mem_map); - for (i = 0; i < num_boot_page_allocations; i++) { - struct page *page = pfn_to_page(boot_page_allocations[i].pfn); - page->type = boot_page_allocations[i].type; - page->type = boot_page_allocations[i].type & - ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); - } - boot_allocations_applied = 1; -} - -static void record_page_type(u32 pfn, int type) -{ - BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS); - boot_page_allocations[num_boot_page_allocations].pfn = pfn; - boot_page_allocations[num_boot_page_allocations].type = type; - num_boot_page_allocations++; -} - -static void check_zeroed_page(u32 pfn, int type, struct page *page) -{ - u32 *ptr; - int i; - int limit = PAGE_SIZE / sizeof(int); - - if (page_address(page)) - ptr = (u32 *)page_address(page); - else - ptr = (u32 *)__va(pfn << PAGE_SHIFT); - /* - * When cloning the root in non-PAE mode, only the userspace - * pdes need to be zeroed. - */ - if (type & VMI_PAGE_CLONE) - limit = KERNEL_PGD_BOUNDARY; - for (i = 0; i < limit; i++) - BUG_ON(ptr[i]); -} - -/* - * We stash the page type into struct page so we can verify the page - * types are used properly. - */ -static void vmi_set_page_type(u32 pfn, int type) -{ - /* PAE can have multiple roots per page - don't track */ - if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) - return; - - if (boot_allocations_applied) { - struct page *page = pfn_to_page(pfn); - if (type != VMI_PAGE_NORMAL) - BUG_ON(page->type); - else - BUG_ON(page->type == VMI_PAGE_NORMAL); - page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); - if (type & VMI_PAGE_ZEROED) - check_zeroed_page(pfn, type, page); - } else { - record_page_type(pfn, type); - } -} - -static void vmi_check_page_type(u32 pfn, int type) -{ - /* PAE can have multiple roots per page - skip checks */ - if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) - return; - - type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); - if (boot_allocations_applied) { - struct page *page = pfn_to_page(pfn); - BUG_ON((page->type ^ type) & VMI_PAGE_PAE); - BUG_ON(type == VMI_PAGE_NORMAL && page->type); - BUG_ON((type & page->type) == 0); - } -} -#else -#define vmi_set_page_type(p,t) do { } while (0) -#define vmi_check_page_type(p,t) do { } while (0) -#endif - #ifdef CONFIG_HIGHPTE static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) { @@ -395,7 +292,6 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) { - vmi_set_page_type(pfn, VMI_PAGE_L1); vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); } @@ -406,27 +302,22 @@ static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn) * It is called only for swapper_pg_dir, which already has * data on it. */ - vmi_set_page_type(pfn, VMI_PAGE_L2); vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); } static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count) { - vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); - vmi_check_page_type(clonepfn, VMI_PAGE_L2); vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); } static void vmi_release_pte(unsigned long pfn) { vmi_ops.release_page(pfn, VMI_PAGE_L1); - vmi_set_page_type(pfn, VMI_PAGE_NORMAL); } static void vmi_release_pmd(unsigned long pfn) { vmi_ops.release_page(pfn, VMI_PAGE_L2); - vmi_set_page_type(pfn, VMI_PAGE_NORMAL); } /* @@ -450,26 +341,22 @@ static void vmi_release_pmd(unsigned long pfn) static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); } static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); } static void vmi_set_pte(pte_t *ptep, pte_t pte) { /* XXX because of set_pmd_pte, this can be called on PT or PD layers */ - vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD); vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); } static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); } @@ -477,10 +364,8 @@ static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) { #ifdef CONFIG_X86_PAE const pte_t pte = { .pte = pmdval.pmd }; - vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD); #else const pte_t pte = { pmdval.pud.pgd.pgd }; - vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD); #endif vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); } @@ -502,7 +387,6 @@ static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1)); } @@ -510,21 +394,18 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval) { /* Um, eww */ const pte_t pte = { .pte = pudval.pgd.pgd }; - vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD); vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); } static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { const pte_t pte = { .pte = 0 }; - vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); } static void vmi_pmd_clear(pmd_t *pmd) { const pte_t pte = { .pte = 0 }; - vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); } #endif diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index a9b8560adbc2..82c67559dde7 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -44,6 +44,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.fixup) *(.gnu.warning) _etext = .; /* End of text section */ diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 46e05447405b..1a614c0e6bef 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -35,6 +35,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.fixup) *(.gnu.warning) _etext = .; /* End of text section */ diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 0b8b6690a86d..44153afc9067 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -17,6 +17,9 @@ * want per guest time just set the kernel.vsyscall64 sysctl to 0. */ +/* Disable profiling for userspace code: */ +#define DISABLE_BRANCH_PROFILING + #include <linux/time.h> #include <linux/init.h> #include <linux/kernel.h> @@ -128,7 +131,16 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) gettimeofday(tv,NULL); return; } + + /* + * Surround the RDTSC by barriers, to make sure it's not + * speculated to outside the seqlock critical section and + * does not cause time warps: + */ + rdtsc_barrier(); now = vread(); + rdtsc_barrier(); + base = __vsyscall_gtod_data.clock.cycle_last; mask = __vsyscall_gtod_data.clock.mask; mult = __vsyscall_gtod_data.clock.mult; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a5d8e1ace1cf..50a779264bb1 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -590,7 +590,8 @@ static void __init lguest_init_IRQ(void) * a straightforward 1 to 1 mapping, so force that here. */ __get_cpu_var(vector_irq)[vector] = i; if (vector != SYSCALL_VECTOR) { - set_intr_gate(vector, interrupt[vector]); + set_intr_gate(vector, + interrupt[vector-FIRST_EXTERNAL_VECTOR]); set_irq_chip_and_handler_name(i, &lguest_irq_controller, handle_level_irq, "level"); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 3c3b471ea496..3624a364b7f3 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -17,6 +17,7 @@ #include <asm/bigsmp/apic.h> #include <asm/bigsmp/ipi.h> #include <asm/mach-default/mach_mpparse.h> +#include <asm/mach-default/mach_wakecpu.h> static int dmi_bigsmp; /* can be set by dmi scanners */ diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 9e835a11a13a..e63a4a76d8cd 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -16,6 +16,7 @@ #include <asm/mach-default/mach_apic.h> #include <asm/mach-default/mach_ipi.h> #include <asm/mach-default/mach_mpparse.h> +#include <asm/mach-default/mach_wakecpu.h> /* should be called last. */ static int probe_default(void) diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 28459cab3ddb..7b4e6d0d1690 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -16,7 +16,19 @@ #include <asm/es7000/apic.h> #include <asm/es7000/ipi.h> #include <asm/es7000/mpparse.h> -#include <asm/es7000/wakecpu.h> +#include <asm/mach-default/mach_wakecpu.h> + +void __init es7000_update_genapic_to_cluster(void) +{ + genapic->target_cpus = target_cpus_cluster; + genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER; + genapic->int_dest_mode = INT_DEST_MODE_CLUSTER; + genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER; + + genapic->init_apic_ldr = init_apic_ldr_cluster; + + genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster; +} static int probe_es7000(void) { diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 5a7e4619e1c4..c346d9d0226f 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -15,6 +15,7 @@ #include <asm/mpspec.h> #include <asm/apicdef.h> #include <asm/genapic.h> +#include <asm/setup.h> extern struct genapic apic_numaq; extern struct genapic apic_summit; @@ -57,6 +58,9 @@ static int __init parse_apic(char *arg) } } + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); + /* Parsed again by __setup for debug/verbose */ return 0; } @@ -72,12 +76,15 @@ void __init generic_bigsmp_probe(void) * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support */ - if (!cmdline_apic && genapic == &apic_default) + if (!cmdline_apic && genapic == &apic_default) { if (apic_bigsmp.probe()) { genapic = &apic_bigsmp; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); printk(KERN_INFO "Overriding APIC driver with %s\n", genapic->name); } + } #endif } @@ -94,6 +101,9 @@ void __init generic_apic_probe(void) /* Not visible without early console */ if (!apic_probe[i]) panic("Didn't find an APIC driver"); + + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); } printk(KERN_INFO "Using APIC driver %s\n", genapic->name); } @@ -108,6 +118,8 @@ int __init mps_oem_check(struct mp_config_table *mpc, char *oem, if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) { if (!cmdline_apic) { genapic = apic_probe[i]; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", genapic->name); } @@ -124,6 +136,8 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { if (!cmdline_apic) { genapic = apic_probe[i]; + if (x86_quirks->update_genapic) + x86_quirks->update_genapic(); printk(KERN_INFO "Switched to APIC driver `%s'.\n", genapic->name); } diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 6272b5e69da6..2c6d234e0009 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -16,6 +16,7 @@ #include <asm/summit/apic.h> #include <asm/summit/ipi.h> #include <asm/summit/mpparse.h> +#include <asm/mach-default/mach_wakecpu.h> static int probe_summit(void) { diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index fea4565ff576..d8cc96a2738f 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o -obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o obj-$(CONFIG_MMIOTRACE) += mmiotrace.o -mmiotrace-y := pf_in.o mmio-mod.o +mmiotrace-y := kmmio.o pf_in.o mmio-mod.o obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o obj-$(CONFIG_NUMA) += numa_$(BITS).o diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 31e8730fa246..57ec8c86a877 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -53,7 +53,7 @@ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) { -#ifdef CONFIG_MMIOTRACE_HOOKS +#ifdef CONFIG_MMIOTRACE if (unlikely(is_kmmio_active())) if (kmmio_handler(regs, addr) == 1) return -1; @@ -393,7 +393,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, if (pte && pte_present(*pte) && !pte_exec(*pte)) printk(KERN_CRIT "kernel tried to execute " "NX-protected page - exploit attempt? " - "(uid: %d)\n", current->uid); + "(uid: %d)\n", current_uid()); } #endif @@ -413,6 +413,7 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, unsigned long error_code) { unsigned long flags = oops_begin(); + int sig = SIGKILL; struct task_struct *tsk; printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", @@ -423,8 +424,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, tsk->thread.trap_no = 14; tsk->thread.error_code = error_code; if (__die("Bad pagetable", regs, error_code)) - regs = NULL; - oops_end(flags, regs, SIGKILL); + sig = 0; + oops_end(flags, regs, sig); } #endif @@ -590,6 +591,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) int fault; #ifdef CONFIG_X86_64 unsigned long flags; + int sig; #endif tsk = current; @@ -849,11 +851,12 @@ no_context: bust_spinlocks(0); do_exit(SIGKILL); #else + sig = SIGKILL; if (__die("Oops", regs, error_code)) - regs = NULL; + sig = 0; /* Executive summary in case the body of the oops scrolled away */ printk(KERN_EMERG "CR2: %016lx\n", address); - oops_end(flags, regs, SIGKILL); + oops_end(flags, regs, sig); #endif /* diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 2b4b14fc0c04..8655b5bb0963 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -68,7 +68,7 @@ static unsigned long __meminitdata table_top; static int __initdata after_init_bootmem; -static __init void *alloc_low_page(unsigned long *phys) +static __init void *alloc_low_page(void) { unsigned long pfn = table_end++; void *adr; @@ -78,7 +78,6 @@ static __init void *alloc_low_page(unsigned long *phys) adr = __va(pfn * PAGE_SIZE); memset(adr, 0, PAGE_SIZE); - *phys = pfn * PAGE_SIZE; return adr; } @@ -93,16 +92,17 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) pmd_t *pmd_table; #ifdef CONFIG_X86_PAE - unsigned long phys; if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_init_bootmem) pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); else - pmd_table = (pmd_t *)alloc_low_page(&phys); + pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); + + return pmd_table; } #endif pud = pud_offset(pgd, 0); @@ -127,10 +127,8 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) if (!page_table) page_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); - } else { - unsigned long phys; - page_table = (pte_t *)alloc_low_page(&phys); - } + } else + page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); @@ -970,8 +968,6 @@ void __init mem_init(void) int codesize, reservedpages, datasize, initsize; int tmp; - start_periodic_check_for_corruption(); - pci_iommu_alloc(); #ifdef CONFIG_FLATMEM @@ -1043,11 +1039,25 @@ void __init mem_init(void) (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); + /* + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. + */ +#define __FIXADDR_TOP (-PAGE_SIZE) +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); + BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); +#endif +#define high_memory (-128UL << 20) + BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); +#undef high_memory +#undef __FIXADDR_TOP + #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE); #endif - BUG_ON(VMALLOC_START > VMALLOC_END); + BUG_ON(VMALLOC_START >= VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START); if (boot_cpu_data.wp_works_ok < 0) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9db01db6e3cd..9f7a0d24d42a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -902,8 +902,6 @@ void __init mem_init(void) long codesize, reservedpages, datasize, initsize; unsigned long absent_pages; - start_periodic_check_for_corruption(); - pci_iommu_alloc(); /* clear_bss() already clear the empty_zero_page */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d4c4307ff3e0..bd85d42819e1 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -223,7 +223,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, * Check if the request spans more than any BAR in the iomem resource * tree. */ - WARN_ON(iomem_map_sanity_check(phys_addr, size)); + WARN_ONCE(iomem_map_sanity_check(phys_addr, size), + KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); /* * Don't allow anybody to remap normal RAM that we're using.. diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index eb1bf000d12e..85cbd3cd3723 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -596,6 +596,242 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) free_memtype(addr, addr + size); } +/* + * Internal interface to reserve a range of physical memory with prot. + * Reserved non RAM regions only and after successful reserve_memtype, + * this func also keeps identity mapping (if any) in sync with this new prot. + */ +static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) +{ + int is_ram = 0; + int id_sz, ret; + unsigned long flags; + unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); + + is_ram = pagerange_is_ram(paddr, paddr + size); + + if (is_ram != 0) { + /* + * For mapping RAM pages, drivers need to call + * set_memory_[uc|wc|wb] directly, for reserve and free, before + * setting up the PTE. + */ + WARN_ON_ONCE(1); + return 0; + } + + ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); + if (ret) + return ret; + + if (flags != want_flags) { + free_memtype(paddr, paddr + size); + printk(KERN_ERR + "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", + current->comm, current->pid, + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size), + cattr_name(flags)); + return -EINVAL; + } + + /* Need to keep identity mapping in sync */ + if (paddr >= __pa(high_memory)) + return 0; + + id_sz = (__pa(high_memory) < paddr + size) ? + __pa(high_memory) - paddr : + size; + + if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { + free_memtype(paddr, paddr + size); + printk(KERN_ERR + "%s:%d reserve_pfn_range ioremap_change_attr failed %s " + "for %Lx-%Lx\n", + current->comm, current->pid, + cattr_name(flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size)); + return -EINVAL; + } + return 0; +} + +/* + * Internal interface to free a range of physical memory. + * Frees non RAM regions only. + */ +static void free_pfn_range(u64 paddr, unsigned long size) +{ + int is_ram; + + is_ram = pagerange_is_ram(paddr, paddr + size); + if (is_ram == 0) + free_memtype(paddr, paddr + size); +} + +/* + * track_pfn_vma_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + * + * If the vma has a linear pfn mapping for the entire range, we get the prot + * from pte and reserve the entire vma range with single reserve_pfn_range call. + * Otherwise, we reserve the entire vma range, my ging through the PTEs page + * by page to get physical address and protection. + */ +int track_pfn_vma_copy(struct vm_area_struct *vma) +{ + int retval = 0; + unsigned long i, j; + resource_size_t paddr; + unsigned long prot; + unsigned long vma_start = vma->vm_start; + unsigned long vma_end = vma->vm_end; + unsigned long vma_size = vma_end - vma_start; + + if (!pat_enabled) + return 0; + + if (is_linear_pfn_mapping(vma)) { + /* + * reserve the whole chunk covered by vma. We need the + * starting address and protection from pte. + */ + if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { + WARN_ON_ONCE(1); + return -EINVAL; + } + return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); + } + + /* reserve entire vma page by page, using pfn and prot from pte */ + for (i = 0; i < vma_size; i += PAGE_SIZE) { + if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) + continue; + + retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); + if (retval) + goto cleanup_ret; + } + return 0; + +cleanup_ret: + /* Reserve error: Cleanup partial reservation and return error */ + for (j = 0; j < i; j += PAGE_SIZE) { + if (follow_phys(vma, vma_start + j, 0, &prot, &paddr)) + continue; + + free_pfn_range(paddr, PAGE_SIZE); + } + + return retval; +} + +/* + * track_pfn_vma_new is called when a _new_ pfn mapping is being established + * for physical range indicated by pfn and size. + * + * prot is passed in as a parameter for the new mapping. If the vma has a + * linear pfn mapping for the entire range reserve the entire vma range with + * single reserve_pfn_range call. + * Otherwise, we look t the pfn and size and reserve only the specified range + * page by page. + * + * Note that this function can be called with caller trying to map only a + * subrange/page inside the vma. + */ +int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, + unsigned long pfn, unsigned long size) +{ + int retval = 0; + unsigned long i, j; + resource_size_t base_paddr; + resource_size_t paddr; + unsigned long vma_start = vma->vm_start; + unsigned long vma_end = vma->vm_end; + unsigned long vma_size = vma_end - vma_start; + + if (!pat_enabled) + return 0; + + if (is_linear_pfn_mapping(vma)) { + /* reserve the whole chunk starting from vm_pgoff */ + paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; + return reserve_pfn_range(paddr, vma_size, prot); + } + + /* reserve page by page using pfn and size */ + base_paddr = (resource_size_t)pfn << PAGE_SHIFT; + for (i = 0; i < size; i += PAGE_SIZE) { + paddr = base_paddr + i; + retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); + if (retval) + goto cleanup_ret; + } + return 0; + +cleanup_ret: + /* Reserve error: Cleanup partial reservation and return error */ + for (j = 0; j < i; j += PAGE_SIZE) { + paddr = base_paddr + j; + free_pfn_range(paddr, PAGE_SIZE); + } + + return retval; +} + +/* + * untrack_pfn_vma is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case size can be zero). + */ +void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size) +{ + unsigned long i; + resource_size_t paddr; + unsigned long prot; + unsigned long vma_start = vma->vm_start; + unsigned long vma_end = vma->vm_end; + unsigned long vma_size = vma_end - vma_start; + + if (!pat_enabled) + return; + + if (is_linear_pfn_mapping(vma)) { + /* free the whole chunk starting from vm_pgoff */ + paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; + free_pfn_range(paddr, vma_size); + return; + } + + if (size != 0 && size != vma_size) { + /* free page by page, using pfn and size */ + paddr = (resource_size_t)pfn << PAGE_SHIFT; + for (i = 0; i < size; i += PAGE_SIZE) { + paddr = paddr + i; + free_pfn_range(paddr, PAGE_SIZE); + } + } else { + /* free entire vma, page by page, using the pfn from pte */ + for (i = 0; i < vma_size; i += PAGE_SIZE) { + if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) + continue; + + free_pfn_range(paddr, PAGE_SIZE); + } + } +} + +pgprot_t pgprot_writecombine(pgprot_t prot) +{ + if (pat_enabled) + return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); + else + return pgprot_noncached(prot); +} + #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) /* get Nth element of the linked list */ diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index b67732bbb85a..bb1a01f089e2 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -23,6 +23,12 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | unsigned int pci_early_dump_regs; static int pci_bf_sort; int pci_routeirq; +int noioapicquirk; +#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS +int noioapicreroute = 0; +#else +int noioapicreroute = 1; +#endif int pcibios_last_bus = -1; unsigned long pirq_table_addr; struct pci_bus *pci_root_bus; @@ -519,6 +525,17 @@ char * __devinit pcibios_setup(char *str) } else if (!strcmp(str, "skip_isa_align")) { pci_probe |= PCI_CAN_SKIP_ISA_ALIGN; return NULL; + } else if (!strcmp(str, "noioapicquirk")) { + noioapicquirk = 1; + return NULL; + } else if (!strcmp(str, "ioapicreroute")) { + if (noioapicreroute != -1) + noioapicreroute = 0; + return NULL; + } else if (!strcmp(str, "noioapicreroute")) { + if (noioapicreroute != -1) + noioapicreroute = 1; + return NULL; } return str; } diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index 9915293500fb..9a5af6c8fbe9 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus, #undef PCI_CONF2_ADDRESS -static struct pci_raw_ops pci_direct_conf2 = { +struct pci_raw_ops pci_direct_conf2 = { .read = pci_conf2_read, .write = pci_conf2_write, }; @@ -289,6 +289,7 @@ int __init pci_direct_probe(void) if (pci_check_type1()) { raw_pci_ops = &pci_direct_conf1; + port_cf9_safe = true; return 1; } release_resource(region); @@ -305,6 +306,7 @@ int __init pci_direct_probe(void) if (pci_check_type2()) { raw_pci_ops = &pci_direct_conf2; + port_cf9_safe = true; return 2; } diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h index 15b9cf6be729..1959018aac02 100644 --- a/arch/x86/pci/pci.h +++ b/arch/x86/pci/pci.h @@ -96,6 +96,7 @@ extern struct pci_raw_ops *raw_pci_ops; extern struct pci_raw_ops *raw_pci_ext_ops; extern struct pci_raw_ops pci_direct_conf1; +extern bool port_cf9_safe; /* arch_initcall level */ extern int pci_direct_probe(void); diff --git a/arch/x86/scripts/strip-symbols b/arch/x86/scripts/strip-symbols new file mode 100644 index 000000000000..a2f1ccb827c7 --- /dev/null +++ b/arch/x86/scripts/strip-symbols @@ -0,0 +1 @@ +__cpu_vendor_dev_X86_VENDOR_* diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 1ef0f90813d6..d9d35824c56f 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -9,6 +9,9 @@ * Also alternative() doesn't work. */ +/* Disable profiling for userspace code: */ +#define DISABLE_BRANCH_PROFILING + #include <linux/kernel.h> #include <linux/posix-timers.h> #include <linux/time.h> diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 513f330c5832..1241f118ab56 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -310,7 +310,7 @@ int __init sysenter_setup(void) } /* Setup a VMA at program startup for the vsyscall page */ -int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 257ba4a10abf..9c98cc6ba978 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) /* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ -int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5e4686d70f62..bea215230b20 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -28,6 +28,7 @@ #include <linux/console.h> #include <xen/interface/xen.h> +#include <xen/interface/version.h> #include <xen/interface/physdev.h> #include <xen/interface/vcpu.h> #include <xen/features.h> @@ -793,7 +794,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) ret = 0; - switch(msr) { + switch (msr) { #ifdef CONFIG_X86_64 unsigned which; u64 base; @@ -1453,7 +1454,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) ident_pte = 0; pfn = 0; - for(pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { + for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { pte_t *pte_page; /* Reuse or allocate a page of ptes */ @@ -1471,7 +1472,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) } /* Install mappings */ - for(pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { + for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; if (pfn > max_pfn_mapped) @@ -1485,7 +1486,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) } } - for(pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) + for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); set_page_prot(pmd, PAGE_KERNEL_RO); @@ -1499,7 +1500,7 @@ static void convert_pfn_mfn(void *v) /* All levels are converted the same way, so just treat them as ptes. */ - for(i = 0; i < PTRS_PER_PTE; i++) + for (i = 0; i < PTRS_PER_PTE; i++) pte[i] = xen_make_pte(pte[i].pte); } @@ -1514,7 +1515,8 @@ static void convert_pfn_mfn(void *v) * of the physical mapping once some sort of allocator has been set * up. */ -static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) +static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, + unsigned long max_pfn) { pud_t *l3; pmd_t *l2; @@ -1577,7 +1579,8 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pf #else /* !CONFIG_X86_64 */ static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; -static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) +static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, + unsigned long max_pfn) { pmd_t *kernel_pmd; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 636ef4caa52d..773d68d3e912 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -154,13 +154,13 @@ void xen_setup_mfn_list_list(void) { unsigned pfn, idx; - for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); } - for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { + for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); } @@ -179,7 +179,7 @@ void __init xen_build_dynamic_phys_to_machine(void) unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); unsigned pfn; - for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { + for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); p2m_top[topidx] = &mfn_list[pfn]; @@ -207,7 +207,7 @@ static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); BUG_ON(p == NULL); - for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++) + for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) p[i] = INVALID_P2M_ENTRY; if (cmpxchg(pp, p2m_missing, p) != p2m_missing) @@ -407,7 +407,8 @@ out: preempt_enable(); } -pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { /* Just return the pte as-is. We preserve the bits on commit */ return *ptep; @@ -878,7 +879,8 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) if (user_pgd) { xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); - xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd))); + xen_do_pin(MMUEXT_PIN_L4_TABLE, + PFN_DOWN(__pa(user_pgd))); } } #else /* CONFIG_X86_32 */ @@ -993,7 +995,8 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) pgd_t *user_pgd = xen_get_user_pgd(pgd); if (user_pgd) { - xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd))); + xen_do_pin(MMUEXT_UNPIN_TABLE, + PFN_DOWN(__pa(user_pgd))); xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); } } diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 8ea8a0d0b0de..c738644b5435 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -154,7 +154,7 @@ void xen_mc_flush(void) ret, smp_processor_id()); dump_stack(); for (i = 0; i < b->mcidx; i++) { - printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n", + printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\n", i+1, b->mcidx, b->debug[i].op, b->debug[i].args[0], diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index d67901083888..15c6c68db6a2 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -28,6 +28,9 @@ /* These are code, but not functions. Defined in entry.S */ extern const char xen_hypervisor_callback[]; extern const char xen_failsafe_callback[]; +extern void xen_sysenter_target(void); +extern void xen_syscall_target(void); +extern void xen_syscall32_target(void); /** @@ -110,7 +113,6 @@ static __cpuinit int register_callback(unsigned type, const void *func) void __cpuinit xen_enable_sysenter(void) { - extern void xen_sysenter_target(void); int ret; unsigned sysenter_feature; @@ -132,8 +134,6 @@ void __cpuinit xen_enable_syscall(void) { #ifdef CONFIG_X86_64 int ret; - extern void xen_syscall_target(void); - extern void xen_syscall32_target(void); ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); if (ret != 0) { @@ -160,7 +160,8 @@ void __init xen_arch_setup(void) HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); if (!xen_feature(XENFEAT_auto_translated_physmap)) - HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3); + HYPERVISOR_vm_assist(VMASST_CMD_enable, + VMASST_TYPE_pae_extended_cr3); if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 11a20adc1409..64f057d89e73 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -365,7 +365,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) static int iss_net_rx(struct net_device *dev) { - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; @@ -456,7 +456,7 @@ static void iss_net_timer(unsigned long priv) static int iss_net_open(struct net_device *dev) { - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); char addr[sizeof "255.255.255.255\0"]; int err; @@ -496,7 +496,7 @@ out: static int iss_net_close(struct net_device *dev) { - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); printk("iss_net_close!\n"); netif_stop_queue(dev); spin_lock(&lp->lock); @@ -515,7 +515,7 @@ printk("iss_net_close!\n"); static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); unsigned long flags; int len; @@ -551,7 +551,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) static struct net_device_stats *iss_net_get_stats(struct net_device *dev) { - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); return &lp->stats; } @@ -578,7 +578,7 @@ static void iss_net_tx_timeout(struct net_device *dev) static int iss_net_set_mac(struct net_device *dev, void *addr) { #if 0 - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); struct sockaddr *hwaddr = addr; spin_lock(&lp->lock); @@ -592,7 +592,7 @@ static int iss_net_set_mac(struct net_device *dev, void *addr) static int iss_net_change_mtu(struct net_device *dev, int new_mtu) { #if 0 - struct iss_net_private *lp = dev->priv; + struct iss_net_private *lp = netdev_priv(dev); int err = 0; spin_lock(&lp->lock); @@ -636,7 +636,7 @@ static int iss_net_configure(int index, char *init) /* Initialize private element. */ - lp = dev->priv; + lp = netdev_priv(dev); *lp = ((struct iss_net_private) { .device_list = LIST_HEAD_INIT(lp->device_list), .opened_list = LIST_HEAD_INIT(lp->opened_list), @@ -660,10 +660,7 @@ static int iss_net_configure(int index, char *init) printk(KERN_INFO "Netdevice %d ", index); if (lp->have_mac) - printk("(%02x:%02x:%02x:%02x:%02x:%02x) ", - lp->mac[0], lp->mac[1], - lp->mac[2], lp->mac[3], - lp->mac[4], lp->mac[5]); + printk("(%pM) ", lp->mac); printk(": "); /* sysfs register */ |