diff options
Diffstat (limited to 'arch')
1104 files changed, 20300 insertions, 15226 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 25b49725df07..76aeb8fa551a 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h index 3832bdb794fe..77516c87255d 100644 --- a/arch/alpha/include/asm/barrier.h +++ b/arch/alpha/include/asm/barrier.h @@ -7,6 +7,57 @@ #define rmb() __asm__ __volatile__("mb": : :"memory") #define wmb() __asm__ __volatile__("wmb": : :"memory") +/** + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + */ #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") #ifdef CONFIG_SMP diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 3de1394bcab8..9a20821b111c 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -87,4 +87,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index f9c732e18284..e51f578636a5 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -104,11 +104,12 @@ struct osf_dirent_callback { }; static int -osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, - u64 ino, unsigned int d_type) +osf_filldir(struct dir_context *ctx, const char *name, int namlen, + loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; - struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; + struct osf_dirent_callback *buf = + container_of(ctx, struct osf_dirent_callback, ctx); unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index b8fffc1a2ac2..be0c39e76f7c 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c8424a85bc04..97d07ed60a0b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -5,6 +5,7 @@ config ARM select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_USE_BUILTIN_BSWAP @@ -687,7 +688,9 @@ config ARCH_SA1100 select CPU_SA1100 select GENERIC_CLOCKEVENTS select HAVE_IDE + select IRQ_DOMAIN select ISA + select MULTI_IRQ_HANDLER select NEED_MACH_MEMORY_H select SPARSE_IRQ help diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f9295a4e1036..5ddd4906f7a7 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1452,14 +1452,6 @@ config EARLY_PRINTK kernel low-level debugging functions. Add earlyprintk to your kernel parameters to enable this console. -config OC_ETM - bool "On-chip ETM and ETB" - depends on ARM_AMBA - help - Enables the on-chip embedded trace macrocell and embedded trace - buffer driver that will allow you to collect traces of the - kernel code. - config ARM_KPROBES_TEST tristate "Kprobes test module" depends on KPROBES && MODULES @@ -1486,4 +1478,59 @@ config DEBUG_SET_MODULE_RONX against certain classes of kernel exploits. If in doubt, say "N". +menuconfig CORESIGHT + bool "CoreSight Tracing Support" + select ARM_AMBA + help + This framework provides a kernel interface for the CoreSight debug + and trace drivers to register themselves with. It's intended to build + a topological view of the CoreSight components based on a DT + specification and configure the right serie of components when a + trace source gets enabled. + +if CORESIGHT +config CORESIGHT_LINKS_AND_SINKS + bool "CoreSight Link and Sink drivers" + help + This enables support for CoreSight link and sink drivers that are + responsible for transporting and collecting the trace data + respectively. Link and sinks are dynamically aggregated with a trace + entity at run time to form a complete trace path. + +config CORESIGHT_LINK_AND_SINK_TMC + bool "Coresight generic TMC driver" + depends on CORESIGHT_LINKS_AND_SINKS + help + This enables support for the Trace Memory Controller driver. Depending + on its configuration the device can act as a link (embedded trace router + - ETR) or sink (embedded trace FIFO). The driver complies with the + generic implementation of the component without special enhancement or + added features. + +config CORESIGHT_SINK_TPIU + bool "Coresight generic TPIU driver" + depends on CORESIGHT_LINKS_AND_SINKS + help + This enables support for the Trace Port Interface Unit driver, responsible + for bridging the gap between the on-chip coresight components and a trace + port collection engine, typically connected to an external host for use + case capturing more traces than the on-board coresight memory can handle. + +config CORESIGHT_SINK_ETBV10 + bool "Coresight ETBv1.0 driver" + depends on CORESIGHT_LINKS_AND_SINKS + help + This enables support for the Embedded Trace Buffer version 1.0 driver + that complies with the generic implementation of the component without + special enhancement or added features. + +config CORESIGHT_SOURCE_ETM3X + bool "CoreSight Embedded Trace Macrocell 3.x driver" + select CORESIGHT_LINKS_AND_SINKS + help + This driver provides support for processor ETM3.x and PTM1.x modules, + which allows tracing the instructions that a processor is executing + This is primarily useful for instruction level tracing. Depending + the ETM version data tracing may also be available. +endif endmenu diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 901739fcb85a..5c42d259fa68 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -80,3 +80,7 @@ status = "okay"; }; }; + +&rtc { + system-power-controller; +}; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index befe713b3e1b..acd37057bca9 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -435,7 +435,7 @@ }; rtc: rtc@44e3e000 { - compatible = "ti,da830-rtc"; + compatible = "ti,am3352-rtc", "ti,da830-rtc"; reg = <0x44e3e000 0x1000>; interrupts = <75 76>; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index d42d7865dd53..b62a1cd776cd 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -843,6 +843,8 @@ maximum-speed = "high-speed"; dr_mode = "otg"; status = "disabled"; + snps,dis_u3_susphy_quirk; + snps,dis_u2_susphy_quirk; }; }; @@ -865,6 +867,8 @@ maximum-speed = "high-speed"; dr_mode = "otg"; status = "disabled"; + snps,dis_u3_susphy_quirk; + snps,dis_u2_susphy_quirk; }; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index d3f65130a1f8..6c0637a4bda5 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -940,6 +940,13 @@ status = "disabled"; }; + trng@fffcc000 { + compatible = "atmel,at91sam9g45-trng"; + reg = <0xfffcc000 0x4000>; + interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>; + clocks = <&trng_clk>; + }; + i2c0: i2c@fff84000 { compatible = "atmel,at91sam9g10-i2c"; reg = <0xfff84000 0x100>; diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi index f3bb2dd6269e..d2d8e94e0aa2 100644 --- a/arch/arm/boot/dts/bcm63138.dtsi +++ b/arch/arm/boot/dts/bcm63138.dtsi @@ -102,7 +102,7 @@ twd_watchdog: watchdog@1e620 { compatible = "arm,cortex-a9-twd-wdt"; reg = <0x1e620 0x20>; - interupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>; }; }; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 242ddda0a8cd..22465494b796 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -311,12 +311,13 @@ adc: adc@126C0000 { compatible = "samsung,exynos3250-adc", "samsung,exynos-adc-v2"; - reg = <0x126C0000 0x100>, <0x10020718 0x4>; + reg = <0x126C0000 0x100>; interrupts = <0 137 0>; clock-names = "adc", "sclk"; clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index 2e9f1f7be77b..93b70402e943 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi @@ -108,13 +108,14 @@ adc: adc@126C0000 { compatible = "samsung,exynos-adc-v1"; - reg = <0x126C0000 0x100>, <0x10020718 0x4>; + reg = <0x126C0000 0x100>; interrupt-parent = <&combiner>; interrupts = <10 3>; clocks = <&clock CLK_TSADC>; clock-names = "adc"; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index d45a07ea3402..0a229fcd7acf 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -754,12 +754,13 @@ adc: adc@12D10000 { compatible = "samsung,exynos-adc-v1"; - reg = <0x12D10000 0x100>, <0x10040718 0x4>; + reg = <0x12D10000 0x100>; interrupts = <0 106 0>; clocks = <&clock CLK_ADC>; clock-names = "adc"; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 90bf4011e319..517e50f6760b 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -541,12 +541,13 @@ adc: adc@12D10000 { compatible = "samsung,exynos-adc-v2"; - reg = <0x12D10000 0x100>, <0x10040720 0x4>; + reg = <0x12D10000 0x100>; interrupts = <0 106 0>; clocks = <&clock CLK_TSADC>; clock-names = "adc"; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/hip04.dtsi b/arch/arm/boot/dts/hip04.dtsi index 93b6c909e991..238814596a87 100644 --- a/arch/arm/boot/dts/hip04.dtsi +++ b/arch/arm/boot/dts/hip04.dtsi @@ -190,6 +190,12 @@ clock-frequency = <168000000>; }; + clk_375m: clk_375m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <375000000>; + }; + soc { /* It's a 32-bit SoC. */ #address-cells = <1>; @@ -264,4 +270,715 @@ }; }; + + etb@0,e3c42000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0 0xe3c42000 0 0x1000>; + + coresight-default-sink; + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + port { + etb0_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&replicator0_out_port0>; + }; + }; + }; + + etb@0,e3c82000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0 0xe3c82000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + port { + etb1_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&replicator1_out_port0>; + }; + }; + }; + + etb@0,e3cc2000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0 0xe3cc2000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + port { + etb2_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&replicator2_out_port0>; + }; + }; + }; + + etb@0,e3d02000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0 0xe3d02000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + port { + etb3_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&replicator3_out_port0>; + }; + }; + }; + + tpiu@0,e3c05000 { + compatible = "arm,coresight-tpiu", "arm,primecell"; + reg = <0 0xe3c05000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + port { + tpiu_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&funnel4_out_port0>; + }; + }; + }; + + replicator0 { + /* non-configurable replicators don't show up on the + * AMBA bus. As such no need to add "arm,primecell". + */ + compatible = "arm,coresight-replicator"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator0_out_port0: endpoint { + remote-endpoint = <&etb0_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator0_out_port1: endpoint { + remote-endpoint = <&funnel4_in_port0>; + }; + }; + + /* replicator input port */ + port@2 { + reg = <0>; + replicator0_in_port0: endpoint { + slave-mode; + remote-endpoint = <&funnel0_out_port0>; + }; + }; + }; + }; + + replicator1 { + /* non-configurable replicators don't show up on the + * AMBA bus. As such no need to add "arm,primecell". + */ + compatible = "arm,coresight-replicator"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator1_out_port0: endpoint { + remote-endpoint = <&etb1_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator1_out_port1: endpoint { + remote-endpoint = <&funnel4_in_port1>; + }; + }; + + /* replicator input port */ + port@2 { + reg = <0>; + replicator1_in_port0: endpoint { + slave-mode; + remote-endpoint = <&funnel1_out_port0>; + }; + }; + }; + }; + + replicator2 { + /* non-configurable replicators don't show up on the + * AMBA bus. As such no need to add "arm,primecell". + */ + compatible = "arm,coresight-replicator"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator2_out_port0: endpoint { + remote-endpoint = <&etb2_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator2_out_port1: endpoint { + remote-endpoint = <&funnel4_in_port2>; + }; + }; + + /* replicator input port */ + port@2 { + reg = <0>; + replicator2_in_port0: endpoint { + slave-mode; + remote-endpoint = <&funnel2_out_port0>; + }; + }; + }; + }; + + replicator3 { + /* non-configurable replicators don't show up on the + * AMBA bus. As such no need to add "arm,primecell". + */ + compatible = "arm,coresight-replicator"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator3_out_port0: endpoint { + remote-endpoint = <&etb3_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator3_out_port1: endpoint { + remote-endpoint = <&funnel4_in_port3>; + }; + }; + + /* replicator input port */ + port@2 { + reg = <0>; + replicator3_in_port0: endpoint { + slave-mode; + remote-endpoint = <&funnel3_out_port0>; + }; + }; + }; + }; + + funnel@0,e3c41000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0xe3c41000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* funnel output port */ + port@0 { + reg = <0>; + funnel0_out_port0: endpoint { + remote-endpoint = + <&replicator0_in_port0>; + }; + }; + + /* funnel input ports */ + port@1 { + reg = <0>; + funnel0_in_port0: endpoint { + slave-mode; + remote-endpoint = <&ptm0_out_port>; + }; + }; + + port@2 { + reg = <1>; + funnel0_in_port1: endpoint { + slave-mode; + remote-endpoint = <&ptm1_out_port>; + }; + }; + + port@3 { + reg = <2>; + funnel0_in_port2: endpoint { + slave-mode; + remote-endpoint = <&ptm2_out_port>; + }; + }; + + port@4 { + reg = <3>; + funnel0_in_port3: endpoint { + slave-mode; + remote-endpoint = <&ptm3_out_port>; + }; + }; + }; + }; + + funnel@0,e3c81000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0xe3c81000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* funnel output port */ + port@0 { + reg = <0>; + funnel1_out_port0: endpoint { + remote-endpoint = + <&replicator1_in_port0>; + }; + }; + + /* funnel input ports */ + port@1 { + reg = <0>; + funnel1_in_port0: endpoint { + slave-mode; + remote-endpoint = <&ptm4_out_port>; + }; + }; + + port@2 { + reg = <1>; + funnel1_in_port1: endpoint { + slave-mode; + remote-endpoint = <&ptm5_out_port>; + }; + }; + + port@3 { + reg = <2>; + funnel1_in_port2: endpoint { + slave-mode; + remote-endpoint = <&ptm6_out_port>; + }; + }; + + port@4 { + reg = <3>; + funnel1_in_port3: endpoint { + slave-mode; + remote-endpoint = <&ptm7_out_port>; + }; + }; + }; + }; + + funnel@0,e3cc1000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0xe3cc1000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* funnel output port */ + port@0 { + reg = <0>; + funnel2_out_port0: endpoint { + remote-endpoint = + <&replicator2_in_port0>; + }; + }; + + /* funnel input ports */ + port@1 { + reg = <0>; + funnel2_in_port0: endpoint { + slave-mode; + remote-endpoint = <&ptm8_out_port>; + }; + }; + + port@2 { + reg = <1>; + funnel2_in_port1: endpoint { + slave-mode; + remote-endpoint = <&ptm9_out_port>; + }; + }; + + port@3 { + reg = <2>; + funnel2_in_port2: endpoint { + slave-mode; + remote-endpoint = <&ptm10_out_port>; + }; + }; + + port@4 { + reg = <3>; + funnel2_in_port3: endpoint { + slave-mode; + remote-endpoint = <&ptm11_out_port>; + }; + }; + }; + }; + + funnel@0,e3d01000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0xe3d01000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* funnel output port */ + port@0 { + reg = <0>; + funnel3_out_port0: endpoint { + remote-endpoint = + <&replicator3_in_port0>; + }; + }; + + /* funnel input ports */ + port@1 { + reg = <0>; + funnel3_in_port0: endpoint { + slave-mode; + remote-endpoint = <&ptm12_out_port>; + }; + }; + + port@2 { + reg = <1>; + funnel3_in_port1: endpoint { + slave-mode; + remote-endpoint = <&ptm13_out_port>; + }; + }; + + port@3 { + reg = <2>; + funnel3_in_port2: endpoint { + slave-mode; + remote-endpoint = <&ptm14_out_port>; + }; + }; + + port@4 { + reg = <3>; + funnel3_in_port3: endpoint { + slave-mode; + remote-endpoint = <&ptm15_out_port>; + }; + }; + }; + }; + + funnel@0,e3c04000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0xe3c04000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* funnel output port */ + port@0 { + reg = <0>; + funnel4_out_port0: endpoint { + remote-endpoint = <&tpiu_in_port>; + }; + }; + + /* funnel input ports */ + port@1 { + reg = <0>; + funnel4_in_port0: endpoint { + slave-mode; + remote-endpoint = + <&replicator0_out_port1>; + }; + }; + + port@2 { + reg = <1>; + funnel4_in_port1: endpoint { + slave-mode; + remote-endpoint = + <&replicator1_out_port1>; + }; + }; + + port@3 { + reg = <2>; + funnel4_in_port2: endpoint { + slave-mode; + remote-endpoint = + <&replicator2_out_port1>; + }; + }; + + port@4 { + reg = <3>; + funnel4_in_port3: endpoint { + slave-mode; + remote-endpoint = + <&replicator3_out_port1>; + }; + }; + }; + }; + + ptm@0,e3c7c000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3c7c000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU0>; + port { + ptm0_out_port: endpoint { + remote-endpoint = <&funnel0_in_port0>; + }; + }; + }; + + ptm@0,e3c7d000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3c7d000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU1>; + port { + ptm1_out_port: endpoint { + remote-endpoint = <&funnel0_in_port1>; + }; + }; + }; + + ptm@0,e3c7e000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3c7e000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU2>; + port { + ptm2_out_port: endpoint { + remote-endpoint = <&funnel0_in_port2>; + }; + }; + }; + + ptm@0,e3c7f000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3c7f000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU3>; + port { + ptm3_out_port: endpoint { + remote-endpoint = <&funnel0_in_port3>; + }; + }; + }; + + ptm@0,e3cbc000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cbc000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU4>; + port { + ptm4_out_port: endpoint { + remote-endpoint = <&funnel1_in_port0>; + }; + }; + }; + + ptm@0,e3cbd000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cbd000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU5>; + port { + ptm5_out_port: endpoint { + remote-endpoint = <&funnel1_in_port1>; + }; + }; + }; + + ptm@0,e3cbe000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cbe000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU6>; + port { + ptm6_out_port: endpoint { + remote-endpoint = <&funnel1_in_port2>; + }; + }; + }; + + ptm@0,e3cbf000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cbf000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU7>; + port { + ptm7_out_port: endpoint { + remote-endpoint = <&funnel1_in_port3>; + }; + }; + }; + + ptm@0,e3cfc000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cfc000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU8>; + port { + ptm8_out_port: endpoint { + remote-endpoint = <&funnel2_in_port0>; + }; + }; + }; + + ptm@0,e3cfd000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cfd000 0 0x1000>; + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU9>; + port { + ptm9_out_port: endpoint { + remote-endpoint = <&funnel2_in_port1>; + }; + }; + }; + + ptm@0,e3cfe000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cfe000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU10>; + port { + ptm10_out_port: endpoint { + remote-endpoint = <&funnel2_in_port2>; + }; + }; + }; + + ptm@0,e3cff000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3cff000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU11>; + port { + ptm11_out_port: endpoint { + remote-endpoint = <&funnel2_in_port3>; + }; + }; + }; + + ptm@0,e3d3c000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3d3c000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU12>; + port { + ptm12_out_port: endpoint { + remote-endpoint = <&funnel3_in_port0>; + }; + }; + }; + + ptm@0,e3d3d000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3d3d000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU13>; + port { + ptm13_out_port: endpoint { + remote-endpoint = <&funnel3_in_port1>; + }; + }; + }; + + ptm@0,e3d3e000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3d3e000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU14>; + port { + ptm14_out_port: endpoint { + remote-endpoint = <&funnel3_in_port2>; + }; + }; + }; + + ptm@0,e3d3f000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0xe3d3f000 0 0x1000>; + + clocks = <&clk_375m>; + clock-names = "apb_pclk"; + cpu = <&CPU15>; + port { + ptm15_out_port: endpoint { + remote-endpoint = <&funnel3_in_port3>; + }; + }; + }; }; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 03bcff87bd27..b67ede515bcd 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -143,5 +143,12 @@ #size-cells = <0>; status = "disabled"; }; + + ir_receiver: ir-receiver@c8100480 { + compatible= "amlogic,meson6-ir"; + reg = <0xc8100480 0x20>; + interrupts = <0 15 1>; + status = "disabled"; + }; }; }; /* end of / */ diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts index 06a8aec4e6ea..25f7b0a22114 100644 --- a/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -145,6 +145,34 @@ }; }; }; + + etb@5401b000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0x5401b000 0x1000>; + + coresight-default-sink; + clocks = <&emu_src_ck>; + clock-names = "apb_pclk"; + port { + etb_in: endpoint { + slave-mode; + remote-endpoint = <&etm_out>; + }; + }; + }; + + etm@54010000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0x54010000 0x1000>; + + clocks = <&emu_src_ck>; + clock-names = "apb_pclk"; + port { + etm_out: endpoint { + remote-endpoint = <&etb_in>; + }; + }; + }; }; &omap3_pmx_wkup { diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index a9aae88b74f5..c792391ef090 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -140,6 +140,34 @@ }; }; }; + + etb@540000000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0x5401b000 0x1000>; + + coresight-default-sink; + clocks = <&emu_src_ck>; + clock-names = "apb_pclk"; + port { + etb_in: endpoint { + slave-mode; + remote-endpoint = <&etm_out>; + }; + }; + }; + + etm@54010000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0x54010000 0x1000>; + + clocks = <&emu_src_ck>; + clock-names = "apb_pclk"; + port { + etm_out: endpoint { + remote-endpoint = <&etb_in>; + }; + }; + }; }; &omap3_pmx_wkup { diff --git a/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi b/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi index e0799966bc25..52dba2e39c71 100644 --- a/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi +++ b/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi @@ -16,31 +16,31 @@ uart0 { uart0_default_mux: uart0_mux { default_mux { - ste,function = "u0"; - ste,pins = "u0_a_1"; + function = "u0"; + groups = "u0_a_1"; }; }; uart0_default_mode: uart0_default { default_cfg1 { - ste,pins = "GPIO0", "GPIO2"; + pins = "GPIO0", "GPIO2"; ste,config = <&in_pu>; }; default_cfg2 { - ste,pins = "GPIO1", "GPIO3"; + pins = "GPIO1", "GPIO3"; ste,config = <&out_hi>; }; }; uart0_sleep_mode: uart0_sleep { sleep_cfg1 { - ste,pins = "GPIO0", "GPIO2"; + pins = "GPIO0", "GPIO2"; ste,config = <&slpm_in_pu>; }; sleep_cfg2 { - ste,pins = "GPIO1", "GPIO3"; + pins = "GPIO1", "GPIO3"; ste,config = <&slpm_out_hi>; }; }; @@ -49,29 +49,29 @@ uart2 { uart2_default_mode: uart2_default { default_mux { - ste,function = "u2"; - ste,pins = "u2txrx_a_1"; + function = "u2"; + groups = "u2txrx_a_1"; }; default_cfg1 { - ste,pins = "GPIO120"; + pins = "GPIO120"; ste,config = <&in_pu>; }; default_cfg2 { - ste,pins = "GPIO121"; + pins = "GPIO121"; ste,config = <&out_hi>; }; }; uart2_sleep_mode: uart2_sleep { sleep_cfg1 { - ste,pins = "GPIO120"; + pins = "GPIO120"; ste,config = <&slpm_in_pu>; }; sleep_cfg2 { - ste,pins = "GPIO121"; + pins = "GPIO121"; ste,config = <&slpm_out_hi>; }; }; @@ -80,21 +80,21 @@ i2c0 { i2c0_default_mux: i2c_mux { default_mux { - ste,function = "i2c0"; - ste,pins = "i2c0_a_1"; + function = "i2c0"; + groups = "i2c0_a_1"; }; }; i2c0_default_mode: i2c_default { default_cfg1 { - ste,pins = "GPIO147", "GPIO148"; + pins = "GPIO147", "GPIO148"; ste,config = <&in_pu>; }; }; i2c0_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO147", "GPIO148"; + pins = "GPIO147", "GPIO148"; ste,config = <&slpm_in_pu>; }; }; @@ -103,21 +103,21 @@ i2c1 { i2c1_default_mux: i2c_mux { default_mux { - ste,function = "i2c1"; - ste,pins = "i2c1_b_2"; + function = "i2c1"; + groups = "i2c1_b_2"; }; }; i2c1_default_mode: i2c_default { default_cfg1 { - ste,pins = "GPIO16", "GPIO17"; + pins = "GPIO16", "GPIO17"; ste,config = <&in_pu>; }; }; i2c1_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO16", "GPIO17"; + pins = "GPIO16", "GPIO17"; ste,config = <&slpm_in_pu>; }; }; @@ -126,21 +126,21 @@ i2c2 { i2c2_default_mux: i2c_mux { default_mux { - ste,function = "i2c2"; - ste,pins = "i2c2_b_2"; + function = "i2c2"; + groups = "i2c2_b_2"; }; }; i2c2_default_mode: i2c_default { default_cfg1 { - ste,pins = "GPIO10", "GPIO11"; + pins = "GPIO10", "GPIO11"; ste,config = <&in_pu>; }; }; i2c2_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO11", "GPIO11"; + pins = "GPIO11", "GPIO11"; ste,config = <&slpm_in_pu>; }; }; @@ -149,21 +149,21 @@ i2c4 { i2c4_default_mux: i2c_mux { default_mux { - ste,function = "i2c4"; - ste,pins = "i2c4_b_2"; + function = "i2c4"; + groups = "i2c4_b_2"; }; }; i2c4_default_mode: i2c_default { default_cfg1 { - ste,pins = "GPIO122", "GPIO123"; + pins = "GPIO122", "GPIO123"; ste,config = <&in_pu>; }; }; i2c4_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO122", "GPIO123"; + pins = "GPIO122", "GPIO123"; ste,config = <&slpm_in_pu>; }; }; @@ -172,21 +172,21 @@ i2c5 { i2c5_default_mux: i2c_mux { default_mux { - ste,function = "i2c5"; - ste,pins = "i2c5_c_2"; + function = "i2c5"; + groups = "i2c5_c_2"; }; }; i2c5_default_mode: i2c_default { default_cfg1 { - ste,pins = "GPIO118", "GPIO119"; + pins = "GPIO118", "GPIO119"; ste,config = <&in_pu>; }; }; i2c5_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO118", "GPIO119"; + pins = "GPIO118", "GPIO119"; ste,config = <&slpm_in_pu>; }; }; diff --git a/arch/arm/boot/dts/ste-href-ab8500.dtsi b/arch/arm/boot/dts/ste-href-ab8500.dtsi index 30f8601da323..9b69bce9297d 100644 --- a/arch/arm/boot/dts/ste-href-ab8500.dtsi +++ b/arch/arm/boot/dts/ste-href-ab8500.dtsi @@ -47,11 +47,11 @@ gpio2 { gpio2_default_mode: gpio2_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio2_a_1"; + function = "gpio"; + groups = "gpio2_a_1"; }; default_cfg { - ste,pins = "GPIO2_T9"; + pins = "GPIO2_T9"; input-enable; bias-pull-down; }; @@ -60,11 +60,11 @@ gpio4 { gpio4_default_mode: gpio4_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio4_a_1"; + function = "gpio"; + groups = "gpio4_a_1"; }; default_cfg { - ste,pins = "GPIO4_W2"; + pins = "GPIO4_W2"; input-enable; bias-pull-down; }; @@ -73,11 +73,11 @@ gpio10 { gpio10_default_mode: gpio10_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio10_d_1"; + function = "gpio"; + groups = "gpio10_d_1"; }; default_cfg { - ste,pins = "GPIO10_U17"; + pins = "GPIO10_U17"; input-enable; bias-pull-down; }; @@ -86,11 +86,11 @@ gpio11 { gpio11_default_mode: gpio11_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio11_d_1"; + function = "gpio"; + groups = "gpio11_d_1"; }; default_cfg { - ste,pins = "GPIO11_AA18"; + pins = "GPIO11_AA18"; input-enable; bias-pull-down; }; @@ -99,11 +99,11 @@ gpio12 { gpio12_default_mode: gpio12_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio12_d_1"; + function = "gpio"; + groups = "gpio12_d_1"; }; default_cfg { - ste,pins = "GPIO12_U16"; + pins = "GPIO12_U16"; input-enable; bias-pull-down; }; @@ -112,11 +112,11 @@ gpio13 { gpio13_default_mode: gpio13_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio13_d_1"; + function = "gpio"; + groups = "gpio13_d_1"; }; default_cfg { - ste,pins = "GPIO13_W17"; + pins = "GPIO13_W17"; input-enable; bias-pull-down; }; @@ -125,11 +125,11 @@ gpio16 { gpio16_default_mode: gpio16_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio16_a_1"; + function = "gpio"; + groups = "gpio16_a_1"; }; default_cfg { - ste,pins = "GPIO16_F15"; + pins = "GPIO16_F15"; input-enable; bias-pull-down; }; @@ -138,11 +138,11 @@ gpio24 { gpio24_default_mode: gpio24_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio24_a_1"; + function = "gpio"; + groups = "gpio24_a_1"; }; default_cfg { - ste,pins = "GPIO24_T14"; + pins = "GPIO24_T14"; input-enable; bias-pull-down; }; @@ -151,11 +151,11 @@ gpio25 { gpio25_default_mode: gpio25_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio25_a_1"; + function = "gpio"; + groups = "gpio25_a_1"; }; default_cfg { - ste,pins = "GPIO25_R16"; + pins = "GPIO25_R16"; input-enable; bias-pull-down; }; @@ -164,11 +164,11 @@ gpio36 { gpio36_default_mode: gpio36_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio36_a_1"; + function = "gpio"; + groups = "gpio36_a_1"; }; default_cfg { - ste,pins = "GPIO36_A17"; + pins = "GPIO36_A17"; input-enable; bias-pull-down; }; @@ -177,11 +177,11 @@ gpio37 { gpio37_default_mode: gpio37_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio37_a_1"; + function = "gpio"; + groups = "gpio37_a_1"; }; default_cfg { - ste,pins = "GPIO37_E15"; + pins = "GPIO37_E15"; input-enable; bias-pull-down; }; @@ -190,11 +190,11 @@ gpio38 { gpio38_default_mode: gpio38_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio38_a_1"; + function = "gpio"; + groups = "gpio38_a_1"; }; default_cfg { - ste,pins = "GPIO38_C17"; + pins = "GPIO38_C17"; input-enable; bias-pull-down; }; @@ -203,11 +203,11 @@ gpio39 { gpio39_default_mode: gpio39_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio39_a_1"; + function = "gpio"; + groups = "gpio39_a_1"; }; default_cfg { - ste,pins = "GPIO39_E16"; + pins = "GPIO39_E16"; input-enable; bias-pull-down; }; @@ -216,11 +216,11 @@ gpio42 { gpio42_default_mode: gpio42_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio42_a_1"; + function = "gpio"; + groups = "gpio42_a_1"; }; default_cfg { - ste,pins = "GPIO42_U2"; + pins = "GPIO42_U2"; input-enable; bias-pull-down; }; @@ -232,11 +232,11 @@ gpio26 { gpio26_default_mode: gpio26_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio26_d_1"; + function = "gpio"; + groups = "gpio26_d_1"; }; default_cfg { - ste,pins = "GPIO26_M16"; + pins = "GPIO26_M16"; output-low; }; }; @@ -244,11 +244,11 @@ gpio35 { gpio35_default_mode: gpio35_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio35_d_1"; + function = "gpio"; + groups = "gpio35_d_1"; }; default_cfg { - ste,pins = "GPIO35_W15"; + pins = "GPIO35_W15"; output-low; }; }; @@ -260,11 +260,11 @@ ycbcr { ycbcr_default_mode: ycbcr_default { default_mux { - ste,function = "ycbcr"; - ste,pins = "ycbcr0123_d_1"; + function = "ycbcr"; + groups = "ycbcr0123_d_1"; }; default_cfg { - ste,pins = "GPIO6_Y18", + pins = "GPIO6_Y18", "GPIO7_AA20", "GPIO8_W18", "GPIO9_AA19"; @@ -277,11 +277,11 @@ pwm { pwm_default_mode: pwm_default { default_mux { - ste,function = "pwmout"; - ste,pins = "pwmout1_d_1", "pwmout2_d_1"; + function = "pwmout"; + groups = "pwmout1_d_1", "pwmout2_d_1"; }; default_cfg { - ste,pins = "GPIO14_F14", + pins = "GPIO14_F14", "GPIO15_B17"; input-enable; bias-pull-down; @@ -292,11 +292,11 @@ adi1 { adi1_default_mode: adi1_default { default_mux { - ste,function = "adi1"; - ste,pins = "adi1_d_1"; + function = "adi1"; + groups = "adi1_d_1"; }; default_cfg { - ste,pins = "GPIO17_P5", + pins = "GPIO17_P5", "GPIO18_R5", "GPIO19_U5", "GPIO20_T5"; @@ -309,11 +309,11 @@ usbuicc { usbuicc_default_mode: usbuicc_default { default_mux { - ste,function = "usbuicc"; - ste,pins = "usbuicc_d_1"; + function = "usbuicc"; + groups = "usbuicc_d_1"; }; default_cfg { - ste,pins = "GPIO21_H19", + pins = "GPIO21_H19", "GPIO22_G20", "GPIO23_G19"; input-enable; @@ -325,13 +325,13 @@ dmic { dmic_default_mode: dmic_default { default_mux { - ste,function = "dmic"; - ste,pins = "dmic12_d_1", + function = "dmic"; + groups = "dmic12_d_1", "dmic34_d_1", "dmic56_d_1"; }; default_cfg { - ste,pins = "GPIO27_J6", + pins = "GPIO27_J6", "GPIO28_K6", "GPIO29_G6", "GPIO30_H6", @@ -345,11 +345,11 @@ extcpena { extcpena_default_mode: extcpena_default { default_mux { - ste,function = "extcpena"; - ste,pins = "extcpena_d_1"; + function = "extcpena"; + groups = "extcpena_d_1"; }; default_cfg { - ste,pins = "GPIO34_R17"; + pins = "GPIO34_R17"; input-enable; bias-pull-down; }; @@ -359,11 +359,11 @@ modsclsda { modsclsda_default_mode: modsclsda_default { default_mux { - ste,function = "modsclsda"; - ste,pins = "modsclsda_d_1"; + function = "modsclsda"; + groups = "modsclsda_d_1"; }; default_cfg { - ste,pins = "GPIO40_T19", + pins = "GPIO40_T19", "GPIO41_U19"; input-enable; bias-pull-down; @@ -376,22 +376,22 @@ sysclkreq2 { sysclkreq2_default_mode: sysclkreq2_default { default_mux { - ste,function = "sysclkreq"; - ste,pins = "sysclkreq2_d_1"; + function = "sysclkreq"; + groups = "sysclkreq2_d_1"; }; default_cfg { - ste,pins = "GPIO1_T10"; + pins = "GPIO1_T10"; input-enable; bias-disable; }; }; sysclkreq2_sleep_mode: sysclkreq2_sleep { default_mux { - ste,function = "gpio"; - ste,pins = "gpio1_a_1"; + function = "gpio"; + groups = "gpio1_a_1"; }; default_cfg { - ste,pins = "GPIO1_T10"; + pins = "GPIO1_T10"; input-enable; bias-pull-down; }; @@ -400,22 +400,22 @@ sysclkreq4 { sysclkreq4_default_mode: sysclkreq4_default { default_mux { - ste,function = "sysclkreq"; - ste,pins = "sysclkreq4_d_1"; + function = "sysclkreq"; + groups = "sysclkreq4_d_1"; }; default_cfg { - ste,pins = "GPIO3_U9"; + pins = "GPIO3_U9"; input-enable; bias-disable; }; }; sysclkreq4_sleep_mode: sysclkreq4_sleep { default_mux { - ste,function = "gpio"; - ste,pins = "gpio3_a_1"; + function = "gpio"; + groups = "gpio3_a_1"; }; default_cfg { - ste,pins = "GPIO3_U9"; + pins = "GPIO3_U9"; input-enable; bias-pull-down; }; diff --git a/arch/arm/boot/dts/ste-href-ab8505.dtsi b/arch/arm/boot/dts/ste-href-ab8505.dtsi index 6006d62086a2..ccf37a9df050 100644 --- a/arch/arm/boot/dts/ste-href-ab8505.dtsi +++ b/arch/arm/boot/dts/ste-href-ab8505.dtsi @@ -35,11 +35,11 @@ gpio2 { gpio2_default_mode: gpio2_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio2_a_1"; + function = "gpio"; + groups = "gpio2_a_1"; }; default_cfg { - ste,pins = "GPIO2_R5"; + pins = "GPIO2_R5"; input-enable; bias-pull-down; }; @@ -48,11 +48,11 @@ gpio10 { gpio10_default_mode: gpio10_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio10_d_1"; + function = "gpio"; + groups = "gpio10_d_1"; }; default_cfg { - ste,pins = "GPIO10_B16"; + pins = "GPIO10_B16"; input-enable; bias-pull-down; }; @@ -61,11 +61,11 @@ gpio11 { gpio11_default_mode: gpio11_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio11_d_1"; + function = "gpio"; + groups = "gpio11_d_1"; }; default_cfg { - ste,pins = "GPIO11_B17"; + pins = "GPIO11_B17"; input-enable; bias-pull-down; }; @@ -74,11 +74,11 @@ gpio13 { gpio13_default_mode: gpio13_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio13_d_1"; + function = "gpio"; + groups = "gpio13_d_1"; }; default_cfg { - ste,pins = "GPIO13_D17"; + pins = "GPIO13_D17"; input-enable; bias-disable; }; @@ -87,11 +87,11 @@ gpio34 { gpio34_default_mode: gpio34_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio34_a_1"; + function = "gpio"; + groups = "gpio34_a_1"; }; default_cfg { - ste,pins = "GPIO34_H14"; + pins = "GPIO34_H14"; input-enable; bias-pull-down; }; @@ -100,11 +100,11 @@ gpio50 { gpio50_default_mode: gpio50_default { default_mux { - ste,function = "gpio"; - ste,pins = "gpio50_d_1"; + function = "gpio"; + groups = "gpio50_d_1"; }; default_cfg { - ste,pins = "GPIO50_L4"; + pins = "GPIO50_L4"; input-enable; bias-disable; }; @@ -114,11 +114,11 @@ pwm { pwm_default_mode: pwm_default { default_mux { - ste,function = "pwmout"; - ste,pins = "pwmout1_d_1"; + function = "pwmout"; + groups = "pwmout1_d_1"; }; default_cfg { - ste,pins = "GPIO14_C16"; + pins = "GPIO14_C16"; input-enable; bias-pull-down; }; @@ -128,11 +128,11 @@ adi2 { adi2_default_mode: adi2_default { default_mux { - ste,function = "adi2"; - ste,pins = "adi2_d_1"; + function = "adi2"; + groups = "adi2_d_1"; }; default_cfg { - ste,pins = "GPIO17_P2", + pins = "GPIO17_P2", "GPIO18_N3", "GPIO19_T1", "GPIO20_P3"; @@ -145,11 +145,11 @@ modsclsda { modsclsda_default_mode: modsclsda_default { default_mux { - ste,function = "modsclsda"; - ste,pins = "modsclsda_d_1"; + function = "modsclsda"; + groups = "modsclsda_d_1"; }; default_cfg { - ste,pins = "GPIO40_J15", + pins = "GPIO40_J15", "GPIO41_J14"; input-enable; bias-pull-down; @@ -159,11 +159,11 @@ resethw { resethw_default_mode: resethw_default { default_mux { - ste,function = "resethw"; - ste,pins = "resethw_d_1"; + function = "resethw"; + groups = "resethw_d_1"; }; default_cfg { - ste,pins = "GPIO52_D16"; + pins = "GPIO52_D16"; input-enable; bias-pull-down; }; @@ -172,11 +172,11 @@ service { service_default_mode: service_default { default_mux { - ste,function = "service"; - ste,pins = "service_d_1"; + function = "service"; + groups = "service_d_1"; }; default_cfg { - ste,pins = "GPIO53_D15"; + pins = "GPIO53_D15"; input-enable; bias-pull-down; }; @@ -188,22 +188,22 @@ sysclkreq2 { sysclkreq2_default_mode: sysclkreq2_default { default_mux { - ste,function = "sysclkreq"; - ste,pins = "sysclkreq2_d_1"; + function = "sysclkreq"; + groups = "sysclkreq2_d_1"; }; default_cfg { - ste,pins = "GPIO1_N4"; + pins = "GPIO1_N4"; input-enable; bias-disable; }; }; sysclkreq2_sleep_mode: sysclkreq2_sleep { default_mux { - ste,function = "gpio"; - ste,pins = "gpio1_a_1"; + function = "gpio"; + groups = "gpio1_a_1"; }; default_cfg { - ste,pins = "GPIO1_N4"; + pins = "GPIO1_N4"; input-enable; bias-pull-down; }; @@ -212,22 +212,22 @@ sysclkreq4 { sysclkreq4_default_mode: sysclkreq4_default { default_mux { - ste,function = "sysclkreq"; - ste,pins = "sysclkreq4_d_1"; + function = "sysclkreq"; + groups = "sysclkreq4_d_1"; }; default_cfg { - ste,pins = "GPIO3_P5"; + pins = "GPIO3_P5"; input-enable; bias-disable; }; }; sysclkreq4_sleep_mode: sysclkreq4_sleep { default_mux { - ste,function = "gpio"; - ste,pins = "gpio3_a_1"; + function = "gpio"; + groups = "gpio3_a_1"; }; default_cfg { - ste,pins = "GPIO3_P5"; + pins = "GPIO3_P5"; input-enable; bias-pull-down; }; diff --git a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi index addfcc7c2750..5c5cea232743 100644 --- a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi +++ b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi @@ -18,33 +18,33 @@ uart0 { uart0_default_mode: uart0_default { default_mux { - ste,function = "u0"; - ste,pins = "u0_a_1"; + function = "u0"; + groups = "u0_a_1"; }; default_cfg1 { - ste,pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */ + pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */ ste,config = <&in_pu>; }; default_cfg2 { - ste,pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */ + pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */ ste,config = <&out_hi>; }; }; uart0_sleep_mode: uart0_sleep { sleep_cfg1 { - ste,pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */ + pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */ ste,config = <&slpm_in_wkup_pdis>; }; sleep_cfg2 { - ste,pins = "GPIO1_AJ3"; /* RTS */ + pins = "GPIO1_AJ3"; /* RTS */ ste,config = <&slpm_out_hi_wkup_pdis>; }; sleep_cfg3 { - ste,pins = "GPIO3_AH3"; /* TXD */ + pins = "GPIO3_AH3"; /* TXD */ ste,config = <&slpm_out_wkup_pdis>; }; }; @@ -53,28 +53,28 @@ uart1 { uart1_default_mode: uart1_default { default_mux { - ste,function = "u1"; - ste,pins = "u1rxtx_a_1"; + function = "u1"; + groups = "u1rxtx_a_1"; }; default_cfg1 { - ste,pins = "GPIO4_AH6"; /* RXD */ + pins = "GPIO4_AH6"; /* RXD */ ste,config = <&in_pu>; }; default_cfg2 { - ste,pins = "GPIO5_AG6"; /* TXD */ + pins = "GPIO5_AG6"; /* TXD */ ste,config = <&out_hi>; }; }; uart1_sleep_mode: uart1_sleep { sleep_cfg1 { - ste,pins = "GPIO4_AH6"; /* RXD */ + pins = "GPIO4_AH6"; /* RXD */ ste,config = <&slpm_in_wkup_pdis>; }; sleep_cfg2 { - ste,pins = "GPIO5_AG6"; /* TXD */ + pins = "GPIO5_AG6"; /* TXD */ ste,config = <&slpm_out_wkup_pdis>; }; }; @@ -83,28 +83,28 @@ uart2 { uart2_default_mode: uart2_default { default_mux { - ste,function = "u2"; - ste,pins = "u2rxtx_c_1"; + function = "u2"; + groups = "u2rxtx_c_1"; }; default_cfg1 { - ste,pins = "GPIO29_W2"; /* RXD */ + pins = "GPIO29_W2"; /* RXD */ ste,config = <&in_pu>; }; default_cfg2 { - ste,pins = "GPIO30_W3"; /* TXD */ + pins = "GPIO30_W3"; /* TXD */ ste,config = <&out_hi>; }; }; uart2_sleep_mode: uart2_sleep { sleep_cfg1 { - ste,pins = "GPIO29_W2"; /* RXD */ + pins = "GPIO29_W2"; /* RXD */ ste,config = <&in_wkup_pdis>; }; sleep_cfg2 { - ste,pins = "GPIO30_W3"; /* TXD */ + pins = "GPIO30_W3"; /* TXD */ ste,config = <&out_wkup_pdis>; }; }; @@ -114,18 +114,18 @@ i2c0 { i2c0_default_mode: i2c_default { default_mux { - ste,function = "i2c0"; - ste,pins = "i2c0_a_1"; + function = "i2c0"; + groups = "i2c0_a_1"; }; default_cfg1 { - ste,pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */ + pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */ ste,config = <&in_pu>; }; }; i2c0_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */ + pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */ ste,config = <&slpm_in_wkup_pdis>; }; }; @@ -134,18 +134,18 @@ i2c1 { i2c1_default_mode: i2c_default { default_mux { - ste,function = "i2c1"; - ste,pins = "i2c1_b_2"; + function = "i2c1"; + groups = "i2c1_b_2"; }; default_cfg1 { - ste,pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */ + pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */ ste,config = <&in_pu>; }; }; i2c1_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */ + pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */ ste,config = <&slpm_in_wkup_pdis>; }; }; @@ -154,18 +154,18 @@ i2c2 { i2c2_default_mode: i2c_default { default_mux { - ste,function = "i2c2"; - ste,pins = "i2c2_b_2"; + function = "i2c2"; + groups = "i2c2_b_2"; }; default_cfg1 { - ste,pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */ + pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */ ste,config = <&in_pu>; }; }; i2c2_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */ + pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */ ste,config = <&slpm_in_wkup_pdis>; }; }; @@ -174,18 +174,18 @@ i2c3 { i2c3_default_mode: i2c_default { default_mux { - ste,function = "i2c3"; - ste,pins = "i2c3_c_2"; + function = "i2c3"; + groups = "i2c3_c_2"; }; default_cfg1 { - ste,pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */ + pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */ ste,config = <&in_pu>; }; }; i2c3_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */ + pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */ ste,config = <&slpm_in_wkup_pdis>; }; }; @@ -198,18 +198,18 @@ i2c4 { i2c4_default_mode: i2c_default { default_mux { - ste,function = "i2c4"; - ste,pins = "i2c4_b_1"; + function = "i2c4"; + groups = "i2c4_b_1"; }; default_cfg1 { - ste,pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */ + pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */ ste,config = <&in_pu>; }; }; i2c4_sleep_mode: i2c_sleep { sleep_cfg1 { - ste,pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */ + pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */ ste,config = <&slpm_in_wkup_pdis>; }; }; @@ -219,19 +219,19 @@ spi2 { spi2_default_mode: spi_default { default_mux { - ste,function = "spi2"; - ste,pins = "spi2_oc1_2"; + function = "spi2"; + groups = "spi2_oc1_2"; }; default_cfg1 { - ste,pins = "GPIO216_AG12"; /* FRM */ + pins = "GPIO216_AG12"; /* FRM */ ste,config = <&gpio_out_hi>; }; default_cfg2 { - ste,pins = "GPIO218_AH11"; /* RXD */ + pins = "GPIO218_AH11"; /* RXD */ ste,config = <&in_pd>; }; default_cfg3 { - ste,pins = + pins = "GPIO215_AH13", /* TXD */ "GPIO217_AH12"; /* CLK */ ste,config = <&out_lo>; @@ -245,32 +245,32 @@ * as we do not state any muxing. */ idle_cfg1 { - ste,pins = "GPIO218_AH11"; /* RXD */ + pins = "GPIO218_AH11"; /* RXD */ ste,config = <&slpm_in_pdis>; }; idle_cfg2 { - ste,pins = "GPIO215_AH13"; /* TXD */ + pins = "GPIO215_AH13"; /* TXD */ ste,config = <&slpm_out_lo_pdis>; }; idle_cfg3 { - ste,pins = "GPIO217_AH12"; /* CLK */ + pins = "GPIO217_AH12"; /* CLK */ ste,config = <&slpm_pdis>; }; }; spi2_sleep_mode: spi_sleep { sleep_cfg1 { - ste,pins = + pins = "GPIO216_AG12", /* FRM */ "GPIO218_AH11"; /* RXD */ ste,config = <&slpm_in_wkup_pdis>; }; sleep_cfg2 { - ste,pins = "GPIO215_AH13"; /* TXD */ + pins = "GPIO215_AH13"; /* TXD */ ste,config = <&slpm_out_lo_wkup_pdis>; }; sleep_cfg3 { - ste,pins = "GPIO217_AH12"; /* CLK */ + pins = "GPIO217_AH12"; /* CLK */ ste,config = <&slpm_wkup_pdis>; }; }; @@ -281,26 +281,26 @@ /* This is the external SD card slot, 4 bits wide */ sdi0_default_mode: sdi0_default { default_mux { - ste,function = "mc0"; - ste,pins = "mc0_a_1"; + function = "mc0"; + groups = "mc0_a_1"; }; default_cfg1 { - ste,pins = + pins = "GPIO18_AC2", /* CMDDIR */ "GPIO19_AC1", /* DAT0DIR */ "GPIO20_AB4"; /* DAT2DIR */ ste,config = <&out_hi>; }; default_cfg2 { - ste,pins = "GPIO22_AA3"; /* FBCLK */ + pins = "GPIO22_AA3"; /* FBCLK */ ste,config = <&in_nopull>; }; default_cfg3 { - ste,pins = "GPIO23_AA4"; /* CLK */ + pins = "GPIO23_AA4"; /* CLK */ ste,config = <&out_lo>; }; default_cfg4 { - ste,pins = + pins = "GPIO24_AB2", /* CMD */ "GPIO25_Y4", /* DAT0 */ "GPIO26_Y2", /* DAT1 */ @@ -312,14 +312,14 @@ sdi0_sleep_mode: sdi0_sleep { sleep_cfg1 { - ste,pins = + pins = "GPIO18_AC2", /* CMDDIR */ "GPIO19_AC1", /* DAT0DIR */ "GPIO20_AB4"; /* DAT2DIR */ ste,config = <&slpm_out_hi_wkup_pdis>; }; sleep_cfg2 { - ste,pins = + pins = "GPIO22_AA3", /* FBCLK */ "GPIO24_AB2", /* CMD */ "GPIO25_Y4", /* DAT0 */ @@ -329,7 +329,7 @@ ste,config = <&slpm_in_wkup_pdis>; }; sleep_cfg3 { - ste,pins = "GPIO23_AA4"; /* CLK */ + pins = "GPIO23_AA4"; /* CLK */ ste,config = <&slpm_out_lo_wkup_pdis>; }; }; @@ -339,19 +339,19 @@ /* This is the WLAN SDIO 4 bits wide */ sdi1_default_mode: sdi1_default { default_mux { - ste,function = "mc1"; - ste,pins = "mc1_a_1"; + function = "mc1"; + groups = "mc1_a_1"; }; default_cfg1 { - ste,pins = "GPIO208_AH16"; /* CLK */ + pins = "GPIO208_AH16"; /* CLK */ ste,config = <&out_lo>; }; default_cfg2 { - ste,pins = "GPIO209_AG15"; /* FBCLK */ + pins = "GPIO209_AG15"; /* FBCLK */ ste,config = <&in_nopull>; }; default_cfg3 { - ste,pins = + pins = "GPIO210_AJ15", /* CMD */ "GPIO211_AG14", /* DAT0 */ "GPIO212_AF13", /* DAT1 */ @@ -363,11 +363,11 @@ sdi1_sleep_mode: sdi1_sleep { sleep_cfg1 { - ste,pins = "GPIO208_AH16"; /* CLK */ + pins = "GPIO208_AH16"; /* CLK */ ste,config = <&slpm_out_lo_wkup_pdis>; }; sleep_cfg2 { - ste,pins = + pins = "GPIO209_AG15", /* FBCLK */ "GPIO210_AJ15", /* CMD */ "GPIO211_AG14", /* DAT0 */ @@ -383,19 +383,19 @@ /* This is the eMMC 8 bits wide, usually PoP eMMC */ sdi2_default_mode: sdi2_default { default_mux { - ste,function = "mc2"; - ste,pins = "mc2_a_1"; + function = "mc2"; + groups = "mc2_a_1"; }; default_cfg1 { - ste,pins = "GPIO128_A5"; /* CLK */ + pins = "GPIO128_A5"; /* CLK */ ste,config = <&out_lo>; }; default_cfg2 { - ste,pins = "GPIO130_C8"; /* FBCLK */ + pins = "GPIO130_C8"; /* FBCLK */ ste,config = <&in_nopull>; }; default_cfg3 { - ste,pins = + pins = "GPIO129_B4", /* CMD */ "GPIO131_A12", /* DAT0 */ "GPIO132_C10", /* DAT1 */ @@ -411,17 +411,17 @@ sdi2_sleep_mode: sdi2_sleep { sleep_cfg1 { - ste,pins = "GPIO128_A5"; /* CLK */ + pins = "GPIO128_A5"; /* CLK */ ste,config = <&out_lo_wkup_pdis>; }; sleep_cfg2 { - ste,pins = + pins = "GPIO130_C8", /* FBCLK */ "GPIO129_B4"; /* CMD */ ste,config = <&in_wkup_pdis_en>; }; sleep_cfg3 { - ste,pins = + pins = "GPIO131_A12", /* DAT0 */ "GPIO132_C10", /* DAT1 */ "GPIO133_B10", /* DAT2 */ @@ -439,19 +439,19 @@ /* This is the eMMC 8 bits wide, usually PCB-mounted eMMC */ sdi4_default_mode: sdi4_default { default_mux { - ste,function = "mc4"; - ste,pins = "mc4_a_1"; + function = "mc4"; + groups = "mc4_a_1"; }; default_cfg1 { - ste,pins = "GPIO203_AE23"; /* CLK */ + pins = "GPIO203_AE23"; /* CLK */ ste,config = <&out_lo>; }; default_cfg2 { - ste,pins = "GPIO202_AF25"; /* FBCLK */ + pins = "GPIO202_AF25"; /* FBCLK */ ste,config = <&in_nopull>; }; default_cfg3 { - ste,pins = + pins = "GPIO201_AF24", /* CMD */ "GPIO200_AH26", /* DAT0 */ "GPIO199_AH23", /* DAT1 */ @@ -467,11 +467,11 @@ sdi4_sleep_mode: sdi4_sleep { sleep_cfg1 { - ste,pins = "GPIO203_AE23"; /* CLK */ + pins = "GPIO203_AE23"; /* CLK */ ste,config = <&out_lo_wkup_pdis>; }; sleep_cfg2 { - ste,pins = + pins = "GPIO202_AF25", /* FBCLK */ "GPIO201_AF24", /* CMD */ "GPIO200_AH26", /* DAT0 */ @@ -494,11 +494,11 @@ msp0 { msp0_default_mode: msp0_default { default_msp0_mux { - ste,function = "msp0"; - ste,pins = "msp0txrx_a_1", "msp0tfstck_a_1"; + function = "msp0"; + groups = "msp0txrx_a_1", "msp0tfstck_a_1"; }; default_msp0_cfg { - ste,pins = + pins = "GPIO12_AC4", /* TXD */ "GPIO15_AC3", /* RXD */ "GPIO13_AF3", /* TFS */ @@ -511,15 +511,15 @@ msp1 { msp1_default_mode: msp1_default { default_mux { - ste,function = "msp1"; - ste,pins = "msp1txrx_a_1", "msp1_a_1"; + function = "msp1"; + groups = "msp1txrx_a_1", "msp1_a_1"; }; default_cfg1 { - ste,pins = "GPIO33_AF2"; + pins = "GPIO33_AF2"; ste,config = <&out_lo>; }; default_cfg2 { - ste,pins = + pins = "GPIO34_AE1", "GPIO35_AE2", "GPIO36_AG2"; @@ -533,18 +533,18 @@ msp2_default_mode: msp2_default { /* MSP2 usually used for HDMI audio */ default_mux { - ste,function = "msp2"; - ste,pins = "msp2_a_1"; + function = "msp2"; + groups = "msp2_a_1"; }; default_cfg1 { - ste,pins = + pins = "GPIO193_AH27", /* TXD */ "GPIO194_AF27", /* TCK */ "GPIO195_AG28"; /* TFS */ ste,config = <&in_pd>; }; default_cfg2 { - ste,pins = "GPIO196_AG26"; /* RXD */ + pins = "GPIO196_AG26"; /* RXD */ ste,config = <&out_lo>; }; }; @@ -554,11 +554,11 @@ musb { musb_default_mode: musb_default { default_mux { - ste,function = "usb"; - ste,pins = "usb_a_1"; + function = "usb"; + groups = "usb_a_1"; }; default_cfg1 { - ste,pins = + pins = "GPIO256_AF28", /* NXT */ "GPIO258_AD29", /* XCLK */ "GPIO259_AC29", /* DIR */ @@ -573,25 +573,25 @@ ste,config = <&in_nopull>; }; default_cfg2 { - ste,pins = "GPIO257_AE29"; /* STP */ + pins = "GPIO257_AE29"; /* STP */ ste,config = <&out_hi>; }; }; musb_sleep_mode: musb_sleep { sleep_cfg1 { - ste,pins = + pins = "GPIO256_AF28", /* NXT */ "GPIO258_AD29", /* XCLK */ "GPIO259_AC29"; /* DIR */ ste,config = <&slpm_wkup_pdis_en>; }; sleep_cfg2 { - ste,pins = "GPIO257_AE29"; /* STP */ + pins = "GPIO257_AE29"; /* STP */ ste,config = <&slpm_out_hi_wkup_pdis>; }; sleep_cfg3 { - ste,pins = + pins = "GPIO260_AD28", /* DAT7 */ "GPIO261_AD26", /* DAT6 */ "GPIO262_AE26", /* DAT5 */ @@ -609,8 +609,8 @@ lcd_default_mode: lcd_default { default_mux { /* Mux in VSI0 and all the data lines */ - ste,function = "lcd"; - ste,pins = + function = "lcd"; + groups = "lcdvsi0_a_1", /* VSI0 for LCD */ "lcd_d0_d7_a_1", /* Data lines */ "lcd_d8_d11_a_1", /* TV-out */ @@ -618,7 +618,7 @@ "lcdvsi1_a_1"; /* VSI1 for HDMI */ }; default_cfg1 { - ste,pins = + pins = "GPIO68_E1", /* VSI0 */ "GPIO69_E2"; /* VSI1 */ ste,config = <&in_pu>; @@ -626,7 +626,7 @@ }; lcd_sleep_mode: lcd_sleep { sleep_cfg1 { - ste,pins = "GPIO69_E2"; /* VSI1 */ + pins = "GPIO69_E2"; /* VSI1 */ ste,config = <&slpm_in_wkup_pdis>; }; }; @@ -636,11 +636,11 @@ /* SKE keys on position 2 in an 8x8 matrix */ ske_kpa2_default_mode: ske_kpa2_default { default_mux { - ste,function = "kp"; - ste,pins = "kp_a_2"; + function = "kp"; + groups = "kp_a_2"; }; default_cfg1 { - ste,pins = + pins = "GPIO153_B17", /* I7 */ "GPIO154_C16", /* I6 */ "GPIO155_C19", /* I5 */ @@ -652,7 +652,7 @@ ste,config = <&in_pd>; }; default_cfg2 { - ste,pins = + pins = "GPIO157_A18", /* O7 */ "GPIO158_C18", /* O6 */ "GPIO159_B19", /* O5 */ @@ -666,7 +666,7 @@ }; ske_kpa2_sleep_mode: ske_kpa2_sleep { sleep_cfg1 { - ste,pins = + pins = "GPIO153_B17", /* I7 */ "GPIO154_C16", /* I6 */ "GPIO155_C19", /* I5 */ @@ -678,7 +678,7 @@ ste,config = <&slpm_in_pu_wkup_pdis_en>; }; sleep_cfg2 { - ste,pins = + pins = "GPIO157_A18", /* O7 */ "GPIO158_C18", /* O6 */ "GPIO159_B19", /* O5 */ @@ -696,11 +696,11 @@ */ ske_kpaoc1_default_mode: ske_kpaoc1_default { default_mux { - ste,function = "kp"; - ste,pins = "kp_a_1", "kp_oc1_1"; + function = "kp"; + groups = "kp_a_1", "kp_oc1_1"; }; default_cfg1 { - ste,pins = + pins = "GPIO91_B6", /* KP_O0 */ "GPIO90_A3", /* KP_O1 */ "GPIO87_B3", /* KP_O2 */ @@ -710,7 +710,7 @@ ste,config = <&out_lo>; }; default_cfg2 { - ste,pins = + pins = "GPIO93_B7", /* KP_I0 */ "GPIO92_D6", /* KP_I1 */ "GPIO89_E6", /* KP_I2 */ @@ -729,13 +729,13 @@ * These are plain GPIO pins used by WLAN */ default_cfg1 { - ste,pins = + pins = "GPIO226_AF8", /* WLAN_PMU_EN */ "GPIO85_D5"; /* WLAN_ENA */ ste,config = <&gpio_out_lo>; }; default_cfg2 { - ste,pins = "GPIO4_AH6"; /* WLAN_IRQ on UART1 */ + pins = "GPIO4_AH6"; /* WLAN_IRQ on UART1 */ ste,config = <&gpio_in_pu>; }; }; diff --git a/arch/arm/boot/dts/ste-href-stuib.dtsi b/arch/arm/boot/dts/ste-href-stuib.dtsi index 84d7c5d883f2..7d4f8184c522 100644 --- a/arch/arm/boot/dts/ste-href-stuib.dtsi +++ b/arch/arm/boot/dts/ste-href-stuib.dtsi @@ -103,7 +103,7 @@ prox { prox_stuib_mode: prox_stuib { stuib_cfg { - ste,pins = "GPIO217_AH12"; + pins = "GPIO217_AH12"; ste,config = <&gpio_in_pu>; }; }; @@ -111,7 +111,7 @@ hall { hall_stuib_mode: stuib_tvk { stuib_cfg { - ste,pins = "GPIO145_C13"; + pins = "GPIO145_C13"; ste,config = <&gpio_in_pu>; }; }; diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi index 18b65d1b14f2..062c6aae3afa 100644 --- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi +++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi @@ -130,7 +130,7 @@ tc35893 { tc35893_tvk_mode: tc35893_tvk { tvk_cfg { - ste,pins = "GPIO218_AH11"; + pins = "GPIO218_AH11"; ste,config = <&gpio_in_pu>; }; }; @@ -138,7 +138,7 @@ prox { prox_tvk_mode: prox_tvk { tvk_cfg { - ste,pins = "GPIO217_AH12"; + pins = "GPIO217_AH12"; ste,config = <&gpio_in_pu>; }; }; @@ -146,7 +146,7 @@ hall { hall_tvk_mode: hall_tvk { tvk_cfg { - ste,pins = "GPIO145_C13"; + pins = "GPIO145_C13"; ste,config = <&gpio_in_pu>; }; }; @@ -155,7 +155,7 @@ accel_tvk_mode: accel_tvk { /* Accelerometer interrupt lines 1 & 2 */ tvk_cfg { - ste,pins = "GPIO82_C1", "GPIO83_D3"; + pins = "GPIO82_C1", "GPIO83_D3"; ste,config = <&gpio_in_pu>; }; }; @@ -164,11 +164,11 @@ magneto_tvk_mode: magneto_tvk { /* Magnetometer uses GPIO 31 and 32, pull these up/down respectively */ tvk_cfg1 { - ste,pins = "GPIO31_V3"; + pins = "GPIO31_V3"; ste,config = <&gpio_in_pu>; }; tvk_cfg2 { - ste,pins = "GPIO32_V2"; + pins = "GPIO32_V2"; ste,config = <&gpio_in_pd>; }; }; diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi index abc762e24fcb..7f3975b58d16 100644 --- a/arch/arm/boot/dts/ste-hrefprev60.dtsi +++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi @@ -79,11 +79,11 @@ ssp0 { ssp0_hrefprev60_mode: ssp0_hrefprev60_default { hrefprev60_mux { - ste,function = "ssp0"; - ste,pins = "ssp0_a_1"; + function = "ssp0"; + groups = "ssp0_a_1"; }; hrefprev60_cfg1 { - ste,pins = "GPIO145_C13"; /* RXD */ + pins = "GPIO145_C13"; /* RXD */ ste,config = <&in_pd>; }; @@ -93,11 +93,11 @@ /* This additional pin needed on early MOP500 and HREFs previous to v60 */ sdi0_default_mode: sdi0_default { hrefprev60_mux { - ste,function = "mc0"; - ste,pins = "mc0dat31dir_a_1"; + function = "mc0"; + groups = "mc0dat31dir_a_1"; }; hrefprev60_cfg1 { - ste,pins = "GPIO21_AB3"; /* DAT31DIR */ + pins = "GPIO21_AB3"; /* DAT31DIR */ ste,config = <&out_hi>; }; @@ -106,7 +106,7 @@ tc35892 { tc35892_hrefprev60_mode: tc35892_hrefprev60 { hrefprev60_cfg { - ste,pins = "GPIO217_AH12"; + pins = "GPIO217_AH12"; ste,config = <&gpio_in_pu>; }; }; @@ -114,11 +114,11 @@ ipgpio { ipgpio_hrefprev60_mode: ipgpio_hrefprev60 { hrefprev60_mux { - ste,function = "ipgpio"; - ste,pins = "ipgpio0_c_1", "ipgpio1_c_1"; + function = "ipgpio"; + groups = "ipgpio0_c_1", "ipgpio1_c_1"; }; hrefprev60_cfg1 { - ste,pins = "GPIO6_AF6", "GPIO7_AG5"; + pins = "GPIO6_AF6", "GPIO7_AG5"; ste,config = <&in_pu>; }; }; diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi index bcc1f0c37f49..a4bc9e77d640 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi +++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi @@ -49,7 +49,7 @@ /* SD card detect GPIO pin, extend default state */ sdi0_default_mode: sdi0_default { default_hrefv60_cfg1 { - ste,pins = "GPIO95_E8"; + pins = "GPIO95_E8"; ste,config = <&gpio_in_pu>; }; }; @@ -64,19 +64,19 @@ */ ipgpio_hrefv60_mode: ipgpio_hrefv60 { hrefv60_mux { - ste,function = "ipgpio"; - ste,pins = "ipgpio0_c_1", "ipgpio1_c_1", "ipgpio4_c_1"; + function = "ipgpio"; + groups = "ipgpio0_c_1", "ipgpio1_c_1", "ipgpio4_c_1"; }; hrefv60_cfg1 { - ste,pins = "GPIO6_AF6", "GPIO7_AG5"; + pins = "GPIO6_AF6", "GPIO7_AG5"; ste,config = <&in_pu>; }; hrefv60_cfg2 { - ste,pins = "GPIO21_AB3"; + pins = "GPIO21_AB3"; ste,config = <&gpio_out_lo>; }; hrefv60_cfg3 { - ste,pins = "GPIO64_F3"; + pins = "GPIO64_F3"; ste,config = <&out_lo>; }; }; @@ -89,7 +89,7 @@ */ etm_hrefv60_mode: etm_hrefv60 { hrefv60_cfg1 { - ste,pins = + pins = "GPIO70_G5", "GPIO71_G4", "GPIO72_H4", @@ -103,11 +103,11 @@ nahj_hrefv60_mode: nahj_hrefv60 { /* NAHJ CTRL on GPIO76 to low, CTRL_INV on GPIO216 to high */ hrefv60_cfg1 { - ste,pins = "GPIO76_J2"; + pins = "GPIO76_J2"; ste,config = <&gpio_out_lo>; }; hrefv60_cfg2 { - ste,pins = "GPIO216_AG12"; + pins = "GPIO216_AG12"; ste,config = <&gpio_out_hi>; }; }; @@ -116,13 +116,13 @@ nfc_hrefv60_mode: nfc_hrefv60 { /* NFC ENA and RESET to low, pulldown IRQ line */ hrefv60_cfg1 { - ste,pins = + pins = "GPIO77_H1", /* NFC_ENA */ "GPIO142_C11"; /* NFC_RESET */ ste,config = <&gpio_out_lo>; }; hrefv60_cfg2 { - ste,pins = "GPIO144_B13"; /* NFC_IRQ */ + pins = "GPIO144_B13"; /* NFC_IRQ */ ste,config = <&gpio_in_pd>; }; }; @@ -130,11 +130,11 @@ force { force_hrefv60_mode: force_hrefv60 { hrefv60_cfg1 { - ste,pins = "GPIO91_B6"; /* FORCE_SENSING_INT */ + pins = "GPIO91_B6"; /* FORCE_SENSING_INT */ ste,config = <&gpio_in_pu>; }; hrefv60_cfg2 { - ste,pins = + pins = "GPIO92_D6", /* FORCE_SENSING_RST */ "GPIO97_D9"; /* FORCE_SENSING_WU */ ste,config = <&gpio_out_lo>; @@ -144,7 +144,7 @@ dipro { dipro_hrefv60_mode: dipro_hrefv60 { hrefv60_cfg1 { - ste,pins = "GPIO139_C9"; /* DIPRO_INT */ + pins = "GPIO139_C9"; /* DIPRO_INT */ ste,config = <&gpio_in_pu>; }; }; @@ -153,7 +153,7 @@ vaudio_hf_hrefv60_mode: vaudio_hf_hrefv60 { /* Audio Amplifier HF enable GPIO */ hrefv60_cfg1 { - ste,pins = "GPIO149_B14"; /* VAUDIO_HF_EN, enable MAX8968 */ + pins = "GPIO149_B14"; /* VAUDIO_HF_EN, enable MAX8968 */ ste,config = <&gpio_out_hi>; }; }; @@ -165,7 +165,7 @@ * pull low to reset state */ hrefv60_cfg1 { - ste,pins = "GPIO171_D23"; /* GBF_ENA_RESET */ + pins = "GPIO171_D23"; /* GBF_ENA_RESET */ ste,config = <&gpio_out_lo>; }; }; @@ -174,7 +174,7 @@ hdtv_hrefv60_mode: hdtv_hrefv60 { /* MSP : HDTV INTERFACE GPIO line */ hrefv60_cfg1 { - ste,pins = "GPIO192_AJ27"; + pins = "GPIO192_AJ27"; ste,config = <&gpio_in_pd>; }; }; @@ -187,11 +187,11 @@ * reset signals low. */ hrefv60_cfg1 { - ste,pins = "GPIO143_D12", "GPIO146_D13"; + pins = "GPIO143_D12", "GPIO146_D13"; ste,config = <&gpio_out_lo>; }; hrefv60_cfg2 { - ste,pins = "GPIO67_G2"; + pins = "GPIO67_G2"; ste,config = <&gpio_in_pu>; }; }; @@ -204,11 +204,11 @@ * Drive DISP1 reset high (not reset), driver DISP2 reset low (reset) */ hrefv60_cfg1 { - ste,pins ="GPIO65_F1"; + pins ="GPIO65_F1"; ste,config = <&gpio_out_hi>; }; hrefv60_cfg2 { - ste,pins ="GPIO66_G3"; + pins ="GPIO66_G3"; ste,config = <&gpio_out_lo>; }; }; diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index e411ff7769fe..85d3b95dfdba 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts @@ -30,12 +30,12 @@ cd_default_mode: cd_default { cd_default_cfg1 { /* CD input GPIO */ - ste,pins = "GPIO111_H21"; + pins = "GPIO111_H21"; ste,input = <0>; }; cd_default_cfg2 { /* CD GPIO biasing */ - ste,pins = "GPIO112_J21"; + pins = "GPIO112_J21"; ste,output = <0>; }; }; @@ -43,7 +43,7 @@ gpioi2c { gpioi2c_default_mode: gpioi2c_default { gpioi2c_default_cfg { - ste,pins = "GPIO73_C21", "GPIO74_C20"; + pins = "GPIO73_C21", "GPIO74_C20"; ste,input = <0>; }; }; @@ -51,7 +51,7 @@ user-led { user_led_default_mode: user_led_default { user_led_default_cfg { - ste,pins = "GPIO2_C5"; + pins = "GPIO2_C5"; ste,output = <1>; }; }; @@ -59,7 +59,7 @@ user-button { user_button_default_mode: user_button_default { user_button_default_cfg { - ste,pins = "GPIO3_A4"; + pins = "GPIO3_A4"; ste,input = <0>; }; }; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index f435ff20aefe..f182f6538e90 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -100,41 +100,41 @@ uart0 { uart0_default_mux: uart0_mux { u0_default_mux { - ste,function = "u0"; - ste,pins = "u0_a_1"; + function = "u0"; + groups = "u0_a_1"; }; }; }; uart1 { uart1_default_mux: uart1_mux { u1_default_mux { - ste,function = "u1"; - ste,pins = "u1_a_1"; + function = "u1"; + groups = "u1_a_1"; }; }; }; mmcsd { mmcsd_default_mux: mmcsd_mux { mmcsd_default_mux { - ste,function = "mmcsd"; - ste,pins = "mmcsd_a_1", "mmcsd_b_1"; + function = "mmcsd"; + groups = "mmcsd_a_1", "mmcsd_b_1"; }; }; mmcsd_default_mode: mmcsd_default { mmcsd_default_cfg1 { /* MCCLK */ - ste,pins = "GPIO8_B10"; + pins = "GPIO8_B10"; ste,output = <0>; }; mmcsd_default_cfg2 { /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */ - ste,pins = "GPIO10_C11", "GPIO15_A12", + pins = "GPIO10_C11", "GPIO15_A12", "GPIO16_C13", "GPIO23_D15"; ste,output = <1>; }; mmcsd_default_cfg3 { /* MCCMD, MCDAT3-0, MCMSFBCLK */ - ste,pins = "GPIO9_A10", "GPIO11_B11", + pins = "GPIO9_A10", "GPIO11_B11", "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO24_C15"; ste,input = <1>; @@ -144,13 +144,13 @@ i2c0 { i2c0_default_mux: i2c0_mux { i2c0_default_mux { - ste,function = "i2c0"; - ste,pins = "i2c0_a_1"; + function = "i2c0"; + groups = "i2c0_a_1"; }; }; i2c0_default_mode: i2c0_default { i2c0_default_cfg { - ste,pins = "GPIO62_D3", "GPIO63_D2"; + pins = "GPIO62_D3", "GPIO63_D2"; ste,input = <0>; }; }; @@ -158,13 +158,13 @@ i2c1 { i2c1_default_mux: i2c1_mux { i2c1_default_mux { - ste,function = "i2c1"; - ste,pins = "i2c1_a_1"; + function = "i2c1"; + groups = "i2c1_a_1"; }; }; i2c1_default_mode: i2c1_default { i2c1_default_cfg { - ste,pins = "GPIO53_L4", "GPIO54_L3"; + pins = "GPIO53_L4", "GPIO54_L3"; ste,input = <0>; }; }; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 3e97a669f15e..206826a855c0 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -404,17 +404,17 @@ */ eth_snowball_mode: eth_snowball { snowball_mux { - ste,function = "sm"; - ste,pins = "sm_b_1"; + function = "sm"; + groups = "sm_b_1"; }; /* LAN IRQ pin */ snowball_cfg1 { - ste,pins = "GPIO140_B11"; + pins = "GPIO140_B11"; ste,config = <&in_nopull>; }; /* LAN reset pin */ snowball_cfg2 { - ste,pins = "GPIO141_C12"; + pins = "GPIO141_C12"; ste,config = <&gpio_out_hi>; }; @@ -423,11 +423,11 @@ sdi0 { sdi0_default_mode: sdi0_default { snowball_mux { - ste,function = "mc0"; - ste,pins = "mc0dat31dir_a_1"; + function = "mc0"; + groups = "mc0dat31dir_a_1"; }; snowball_cfg1 { - ste,pins = "GPIO21_AB3"; /* DAT31DIR */ + pins = "GPIO21_AB3"; /* DAT31DIR */ ste,config = <&out_hi>; }; @@ -436,19 +436,19 @@ ssp0 { ssp0_snowball_mode: ssp0_snowball_default { snowball_mux { - ste,function = "ssp0"; - ste,pins = "ssp0_a_1"; + function = "ssp0"; + groups = "ssp0_a_1"; }; snowball_cfg1 { - ste,pins = "GPIO144_B13"; /* FRM */ + pins = "GPIO144_B13"; /* FRM */ ste,config = <&gpio_out_hi>; }; snowball_cfg2 { - ste,pins = "GPIO145_C13"; /* RXD */ + pins = "GPIO145_C13"; /* RXD */ ste,config = <&in_pd>; }; snowball_cfg3 { - ste,pins = + pins = "GPIO146_D13", /* TXD */ "GPIO143_D12"; /* CLK */ ste,config = <&out_lo>; @@ -459,7 +459,7 @@ gpio_led { gpioled_snowball_mode: gpioled_default { snowball_cfg1 { - ste,pins = "GPIO142_C11"; + pins = "GPIO142_C11"; ste,config = <&gpio_out_hi>; }; @@ -469,7 +469,7 @@ accel_snowball_mode: accel_snowball { /* Accelerometer lines */ snowball_cfg1 { - ste,pins = + pins = "GPIO163_C20", /* ACCEL_IRQ1 */ "GPIO164_B21"; /* ACCEL_IRQ2 */ ste,config = <&gpio_in_pu>; @@ -479,7 +479,7 @@ magnetometer { magneto_snowball_mode: magneto_snowball { snowball_cfg1 { - ste,pins = "GPIO165_C21"; /* MAG_DRDY */ + pins = "GPIO165_C21"; /* MAG_DRDY */ ste,config = <&gpio_in_pu>; }; }; @@ -491,7 +491,7 @@ * pull low to reset state */ snowball_cfg1 { - ste,pins = "GPIO171_D23"; /* GBF_ENA_RESET */ + pins = "GPIO171_D23"; /* GBF_ENA_RESET */ ste,config = <&gpio_out_lo>; }; }; @@ -503,13 +503,13 @@ * These are plain GPIO pins used by WLAN */ snowball_cfg1 { - ste,pins = + pins = "GPIO161_D21", /* WLAN_PMU_EN */ "GPIO215_AH13"; /* WLAN_ENA */ ste,config = <&gpio_out_lo>; }; snowball_cfg2 { - ste,pins = "GPIO216_AG12"; /* WLAN_IRQ */ + pins = "GPIO216_AG12"; /* WLAN_IRQ */ ste,config = <&gpio_in_pu>; }; }; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 322fd1519b09..33920df03640 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -358,6 +358,205 @@ }; }; + etb@0,20010000 { + compatible = "arm,coresight-etb10", "arm,primecell"; + reg = <0 0x20010000 0 0x1000>; + + coresight-default-sink; + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + etb_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&replicator_out_port0>; + }; + }; + }; + + tpiu@0,20030000 { + compatible = "arm,coresight-tpiu", "arm,primecell"; + reg = <0 0x20030000 0 0x1000>; + + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + tpiu_in_port: endpoint@0 { + slave-mode; + remote-endpoint = <&replicator_out_port1>; + }; + }; + }; + + replicator { + /* non-configurable replicators don't show up on the + * AMBA bus. As such no need to add "arm,primecell". + */ + compatible = "arm,coresight-replicator"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator_out_port0: endpoint { + remote-endpoint = <&etb_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator_out_port1: endpoint { + remote-endpoint = <&tpiu_in_port>; + }; + }; + + /* replicator input port */ + port@2 { + reg = <0>; + replicator_in_port0: endpoint { + slave-mode; + remote-endpoint = <&funnel_out_port0>; + }; + }; + }; + }; + + funnel@0,20040000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0x20040000 0 0x1000>; + + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* funnel output port */ + port@0 { + reg = <0>; + funnel_out_port0: endpoint { + remote-endpoint = + <&replicator_in_port0>; + }; + }; + + /* funnel input ports */ + port@1 { + reg = <0>; + funnel_in_port0: endpoint { + slave-mode; + remote-endpoint = <&ptm0_out_port>; + }; + }; + + port@2 { + reg = <1>; + funnel_in_port1: endpoint { + slave-mode; + remote-endpoint = <&ptm1_out_port>; + }; + }; + + port@3 { + reg = <2>; + funnel_in_port2: endpoint { + slave-mode; + remote-endpoint = <&etm0_out_port>; + }; + }; + + /* Input port #3 is for ITM, not supported here */ + + port@4 { + reg = <4>; + funnel_in_port4: endpoint { + slave-mode; + remote-endpoint = <&etm1_out_port>; + }; + }; + + port@5 { + reg = <5>; + funnel_in_port5: endpoint { + slave-mode; + remote-endpoint = <&etm2_out_port>; + }; + }; + }; + }; + + ptm@0,2201c000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0x2201c000 0 0x1000>; + + cpu = <&cpu0>; + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + ptm0_out_port: endpoint { + remote-endpoint = <&funnel_in_port0>; + }; + }; + }; + + ptm@0,2201d000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0x2201d000 0 0x1000>; + + cpu = <&cpu1>; + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + ptm1_out_port: endpoint { + remote-endpoint = <&funnel_in_port1>; + }; + }; + }; + + etm@0,2203c000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0x2203c000 0 0x1000>; + + cpu = <&cpu2>; + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + etm0_out_port: endpoint { + remote-endpoint = <&funnel_in_port2>; + }; + }; + }; + + etm@0,2203d000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0x2203d000 0 0x1000>; + + cpu = <&cpu3>; + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + etm1_out_port: endpoint { + remote-endpoint = <&funnel_in_port4>; + }; + }; + }; + + etm@0,2203e000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0 0x2203e000 0 0x1000>; + + cpu = <&cpu4>; + clocks = <&oscclk6a>; + clock-names = "apb_pclk"; + port { + etm2_out_port: endpoint { + remote-endpoint = <&funnel_in_port5>; + }; + }; + }; + smb { compatible = "simple-bus"; diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index e57d7e5bf96a..5cc779c8e9c6 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -282,8 +282,8 @@ static int sa1111_retrigger_lowirq(struct irq_data *d) } if (i == 8) - printk(KERN_ERR "Danger Will Robinson: failed to " - "re-trigger IRQ%d\n", d->irq); + pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", + d->irq); return i == 8 ? -1 : 0; } @@ -384,8 +384,8 @@ static int sa1111_retrigger_highirq(struct irq_data *d) } if (i == 8) - printk(KERN_ERR "Danger Will Robinson: failed to " - "re-trigger IRQ%d\n", d->irq); + pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", + d->irq); return i == 8 ? -1 : 0; } @@ -740,9 +740,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq) goto err_unmap; } - printk(KERN_INFO "SA1111 Microprocessor Companion Chip: " - "silicon revision %lx, metal revision %lx\n", - (id & SKID_SIREV_MASK)>>4, (id & SKID_MTREV_MASK)); + pr_info("SA1111 Microprocessor Companion Chip: silicon revision %lx, metal revision %lx\n", + (id & SKID_SIREV_MASK) >> 4, id & SKID_MTREV_MASK); /* * We found it. Wake the chip up, and initialise. @@ -1057,7 +1056,6 @@ static struct platform_driver sa1111_device_driver = { .resume = sa1111_resume, .driver = { .name = "sa1111", - .owner = THIS_MODULE, }, }; diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index f95f72d62db7..759f9b0053e2 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -97,7 +97,6 @@ CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_DEFLATE=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_EVBUG=m diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c index 3003fa1f6fb4..0409b8f89782 100644 --- a/arch/arm/crypto/aes_glue.c +++ b/arch/arm/crypto/aes_glue.c @@ -93,6 +93,6 @@ module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); -MODULE_ALIAS("aes-asm"); +MODULE_ALIAS_CRYPTO("aes"); +MODULE_ALIAS_CRYPTO("aes-asm"); MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>"); diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c index 84f2a756588b..e31b0440c613 100644 --- a/arch/arm/crypto/sha1_glue.c +++ b/arch/arm/crypto/sha1_glue.c @@ -171,5 +171,5 @@ module_exit(sha1_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>"); diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c index 6f1b411b1d55..0b0083757d47 100644 --- a/arch/arm/crypto/sha1_neon_glue.c +++ b/arch/arm/crypto/sha1_neon_glue.c @@ -194,4 +194,4 @@ module_exit(sha1_neon_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c index 0d2758ff5e12..b124dce838d6 100644 --- a/arch/arm/crypto/sha512_neon_glue.c +++ b/arch/arm/crypto/sha512_neon_glue.c @@ -241,7 +241,7 @@ static int sha384_neon_final(struct shash_desc *desc, u8 *hash) sha512_neon_final(desc, D); memcpy(hash, D, SHA384_DIGEST_SIZE); - memset(D, 0, SHA512_DIGEST_SIZE); + memzero_explicit(D, SHA512_DIGEST_SIZE); return 0; } @@ -301,5 +301,5 @@ module_exit(sha512_neon_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated"); -MODULE_ALIAS("sha512"); -MODULE_ALIAS("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha384"); diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 70cd84eb7fda..fe74c0d1e485 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += current.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += hash.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index c6a3e73a6e24..d2f81e6b8c1c 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -43,10 +43,14 @@ #define mb() do { dsb(); outer_sync(); } while (0) #define rmb() dsb() #define wmb() do { dsb(st); outer_sync(); } while (0) +#define dma_rmb() dmb(osh) +#define dma_wmb() dmb(oshst) #else #define mb() barrier() #define rmb() barrier() #define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() #endif #ifndef CONFIG_SMP diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 10e78d00a0bb..2d46862e7bef 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +void set_kernel_text_rw(void); +void set_kernel_text_ro(void); +#else +static inline void set_kernel_text_rw(void) { } +static inline void set_kernel_text_ro(void) { } +#endif + void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); + #endif diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h index 2fca60ab513a..af319ac4960c 100644 --- a/arch/arm/include/asm/cpuidle.h +++ b/arch/arm/include/asm/cpuidle.h @@ -15,7 +15,6 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev, .exit_latency = 1,\ .target_residency = 1,\ .power_usage = p,\ - .flags = CPUIDLE_FLAG_TIME_VALID,\ .name = "WFI",\ .desc = "ARM WFI",\ } diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index dc662fca9230..4111592f0130 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -17,6 +17,7 @@ struct dev_archdata { #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif + bool dma_coherent; }; struct omap_device; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 85738b200023..e6e3446abdf6 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -123,11 +123,18 @@ static inline unsigned long dma_max_pfn(struct device *dev) static inline int set_arch_dma_coherent_ops(struct device *dev) { + dev->archdata.dma_coherent = true; set_dma_ops(dev, &arm_coherent_dma_ops); return 0; } #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) +/* do not use this function in a driver */ +static inline bool is_device_dma_coherent(struct device *dev) +{ + return dev->archdata.dma_coherent; +} + static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { unsigned int offset = paddr & ~PAGE_MASK; diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 74124b0d0d79..0415eae1df27 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -2,27 +2,24 @@ #define _ASM_FIXMAP_H #define FIXADDR_START 0xffc00000UL -#define FIXADDR_TOP 0xffe00000UL -#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) +#define FIXADDR_END 0xfff00000UL +#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) -#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) +#include <asm/kmap_types.h> -#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) +enum fixed_addresses { + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, -extern void __this_fixmap_does_not_exist(void); + /* Support writing RO kernel text via kprobes, jump labels, etc. */ + FIX_TEXT_POKE0, + FIX_TEXT_POKE1, -static inline unsigned long fix_to_virt(const unsigned int idx) -{ - if (idx >= FIX_KMAP_NR_PTES) - __this_fixmap_does_not_exist(); - return __fix_to_virt(idx); -} + __end_of_fixed_addresses +}; -static inline unsigned int virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); + +#include <asm-generic/fixmap.h> #endif diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h deleted file mode 100644 index ad774f37c47c..000000000000 --- a/arch/arm/include/asm/hardware/coresight.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * linux/arch/arm/include/asm/hardware/coresight.h - * - * CoreSight components' registers - * - * Copyright (C) 2009 Nokia Corporation. - * Alexander Shishkin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ASM_HARDWARE_CORESIGHT_H -#define __ASM_HARDWARE_CORESIGHT_H - -#define TRACER_ACCESSED_BIT 0 -#define TRACER_RUNNING_BIT 1 -#define TRACER_CYCLE_ACC_BIT 2 -#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT) -#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) -#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) - -#define TRACER_TIMEOUT 10000 - -#define etm_writel(t, v, x) \ - (writel_relaxed((v), (t)->etm_regs + (x))) -#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x))) - -/* CoreSight Management Registers */ -#define CSMR_LOCKACCESS 0xfb0 -#define CSMR_LOCKSTATUS 0xfb4 -#define CSMR_AUTHSTATUS 0xfb8 -#define CSMR_DEVID 0xfc8 -#define CSMR_DEVTYPE 0xfcc -/* CoreSight Component Registers */ -#define CSCR_CLASS 0xff4 - -#define CS_LAR_KEY 0xc5acce55 - -/* ETM control register, "ETM Architecture", 3.3.1 */ -#define ETMR_CTRL 0 -#define ETMCTRL_POWERDOWN 1 -#define ETMCTRL_PROGRAM (1 << 10) -#define ETMCTRL_PORTSEL (1 << 11) -#define ETMCTRL_DO_CONTEXTID (3 << 14) -#define ETMCTRL_PORTMASK1 (7 << 4) -#define ETMCTRL_PORTMASK2 (1 << 21) -#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2) -#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21) -#define ETMCTRL_DO_CPRT (1 << 1) -#define ETMCTRL_DATAMASK (3 << 2) -#define ETMCTRL_DATA_DO_DATA (1 << 2) -#define ETMCTRL_DATA_DO_ADDR (1 << 3) -#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR) -#define ETMCTRL_BRANCH_OUTPUT (1 << 8) -#define ETMCTRL_CYCLEACCURATE (1 << 12) - -/* ETM configuration code register */ -#define ETMR_CONFCODE (0x04) - -/* ETM trace start/stop resource control register */ -#define ETMR_TRACESSCTRL (0x18) - -/* ETM trigger event register */ -#define ETMR_TRIGEVT (0x08) - -/* address access type register bits, "ETM architecture", - * table 3-27 */ -/* - access type */ -#define ETMAAT_IFETCH 0 -#define ETMAAT_IEXEC 1 -#define ETMAAT_IEXECPASS 2 -#define ETMAAT_IEXECFAIL 3 -#define ETMAAT_DLOADSTORE 4 -#define ETMAAT_DLOAD 5 -#define ETMAAT_DSTORE 6 -/* - comparison access size */ -#define ETMAAT_JAVA (0 << 3) -#define ETMAAT_THUMB (1 << 3) -#define ETMAAT_ARM (3 << 3) -/* - data value comparison control */ -#define ETMAAT_NOVALCMP (0 << 5) -#define ETMAAT_VALMATCH (1 << 5) -#define ETMAAT_VALNOMATCH (3 << 5) -/* - exact match */ -#define ETMAAT_EXACTMATCH (1 << 7) -/* - context id comparator control */ -#define ETMAAT_IGNCONTEXTID (0 << 8) -#define ETMAAT_VALUE1 (1 << 8) -#define ETMAAT_VALUE2 (2 << 8) -#define ETMAAT_VALUE3 (3 << 8) -/* - security level control */ -#define ETMAAT_IGNSECURITY (0 << 10) -#define ETMAAT_NSONLY (1 << 10) -#define ETMAAT_SONLY (2 << 10) - -#define ETMR_COMP_VAL(x) (0x40 + (x) * 4) -#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4) - -/* ETM status register, "ETM Architecture", 3.3.2 */ -#define ETMR_STATUS (0x10) -#define ETMST_OVERFLOW BIT(0) -#define ETMST_PROGBIT BIT(1) -#define ETMST_STARTSTOP BIT(2) -#define ETMST_TRIGGER BIT(3) - -#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) -#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) -#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER) - -#define ETMR_TRACEENCTRL2 0x1c -#define ETMR_TRACEENCTRL 0x24 -#define ETMTE_INCLEXCL BIT(24) -#define ETMR_TRACEENEVT 0x20 -#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ - ETMCTRL_DATA_DO_ADDR | \ - ETMCTRL_BRANCH_OUTPUT | \ - ETMCTRL_DO_CONTEXTID) - -/* ETM management registers, "ETM Architecture", 3.5.24 */ -#define ETMMR_OSLAR 0x300 -#define ETMMR_OSLSR 0x304 -#define ETMMR_OSSRR 0x308 -#define ETMMR_PDSR 0x314 - -/* ETB registers, "CoreSight Components TRM", 9.3 */ -#define ETBR_DEPTH 0x04 -#define ETBR_STATUS 0x0c -#define ETBR_READMEM 0x10 -#define ETBR_READADDR 0x14 -#define ETBR_WRITEADDR 0x18 -#define ETBR_TRIGGERCOUNT 0x1c -#define ETBR_CTRL 0x20 -#define ETBR_FORMATTERCTRL 0x304 -#define ETBFF_ENFTC 1 -#define ETBFF_ENFCONT BIT(1) -#define ETBFF_FONFLIN BIT(4) -#define ETBFF_MANUAL_FLUSH BIT(6) -#define ETBFF_TRIGIN BIT(8) -#define ETBFF_TRIGEVT BIT(9) -#define ETBFF_TRIGFL BIT(10) - -#define etb_writel(t, v, x) \ - (writel_relaxed((v), (t)->etb_regs + (x))) -#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x))) - -#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) -#define etm_unlock(t) \ - do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0) - -#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) -#define etb_unlock(t) \ - do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0) - -#endif /* __ASM_HARDWARE_CORESIGHT_H */ - diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h new file mode 100644 index 000000000000..61576dc58ede --- /dev/null +++ b/arch/arm/include/asm/hardware/cp14.h @@ -0,0 +1,542 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_HARDWARE_CP14_H +#define __ASM_HARDWARE_CP14_H + +#include <linux/types.h> + +/* Accessors for CP14 registers */ +#define dbg_read(reg) RCP14_##reg() +#define dbg_write(val, reg) WCP14_##reg(val) +#define etm_read(reg) RCP14_##reg() +#define etm_write(val, reg) WCP14_##reg(val) + +/* MRC14 and MCR14 */ +#define MRC14(op1, crn, crm, op2) \ +({ \ +u32 val; \ +asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \ +val; \ +}) + +#define MCR14(val, op1, crn, crm, op2) \ +({ \ +asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\ +}) + +/* + * Debug Registers + * + * Available only in DBGv7 + * DBGECR, DBGDSCCR, DBGDSMCR, DBGDRCR + * + * Available only in DBGv7.1 + * DBGBXVRm, DBGOSDLR, DBGDEVID2, DBGDEVID1 + * + * Read only + * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGPRSR, + * DBGPRSR, DBGDSAR, DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID + * + * Write only + * DBGDTRTXint, DBGOSLAR + */ +#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0) +#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0) +#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0) +#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0) +#define RCP14_DBGVCR() MRC14(0, c0, c7, 0) +#define RCP14_DBGECR() MRC14(0, c0, c9, 0) +#define RCP14_DBGDSCCR() MRC14(0, c0, c10, 0) +#define RCP14_DBGDSMCR() MRC14(0, c0, c11, 0) +#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2) +#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2) +#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2) +#define RCP14_DBGDRCR() MRC14(0, c0, c4, 2) +#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4) +#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4) +#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4) +#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4) +#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4) +#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4) +#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4) +#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4) +#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4) +#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4) +#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4) +#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4) +#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4) +#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4) +#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4) +#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4) +#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5) +#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5) +#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5) +#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5) +#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5) +#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5) +#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5) +#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5) +#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5) +#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5) +#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5) +#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5) +#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5) +#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5) +#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5) +#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5) +#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6) +#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6) +#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6) +#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6) +#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6) +#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6) +#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6) +#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6) +#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6) +#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6) +#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6) +#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6) +#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6) +#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6) +#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6) +#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6) +#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7) +#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7) +#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7) +#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7) +#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7) +#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7) +#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7) +#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7) +#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7) +#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7) +#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7) +#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7) +#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7) +#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7) +#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7) +#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7) +#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0) +#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1) +#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1) +#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1) +#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1) +#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1) +#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1) +#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1) +#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1) +#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1) +#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1) +#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1) +#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1) +#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1) +#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1) +#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1) +#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1) +#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4) +#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4) +#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4) +#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4) +#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4) +#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0) +#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4) +#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6) +#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6) +#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6) +#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7) +#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7) +#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7) + +#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0) +#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0) +#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0) +#define WCP14_DBGECR(val) MCR14(val, 0, c0, c9, 0) +#define WCP14_DBGDSCCR(val) MCR14(val, 0, c0, c10, 0) +#define WCP14_DBGDSMCR(val) MCR14(val, 0, c0, c11, 0) +#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2) +#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2) +#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2) +#define WCP14_DBGDRCR(val) MCR14(val, 0, c0, c4, 2) +#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4) +#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4) +#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4) +#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4) +#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4) +#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4) +#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4) +#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4) +#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4) +#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4) +#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4) +#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4) +#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4) +#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4) +#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4) +#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4) +#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5) +#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5) +#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5) +#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5) +#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5) +#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5) +#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5) +#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5) +#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5) +#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5) +#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5) +#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5) +#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5) +#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5) +#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5) +#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5) +#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6) +#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6) +#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6) +#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6) +#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6) +#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6) +#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6) +#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6) +#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6) +#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6) +#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6) +#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6) +#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6) +#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6) +#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6) +#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6) +#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7) +#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7) +#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7) +#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7) +#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7) +#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7) +#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7) +#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7) +#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7) +#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7) +#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7) +#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7) +#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7) +#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7) +#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7) +#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7) +#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1) +#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1) +#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1) +#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1) +#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1) +#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1) +#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1) +#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1) +#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1) +#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1) +#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1) +#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1) +#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1) +#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1) +#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1) +#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1) +#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4) +#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4) +#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4) +#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4) +#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4) +#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6) +#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6) + +/* + * ETM Registers + * + * Available only in ETMv3.3, 3.4, 3.5 + * ETMASICCR, ETMTECR2, ETMFFRR, ETMVDEVR, ETMVDCR1, ETMVDCR2, ETMVDCR3, + * ETMDCVRn, ETMDCMRn + * + * Available only in ETMv3.5 as read only + * ETMIDR2 + * + * Available only in ETMv3.5, PFTv1.0, 1.1 + * ETMTSEVR, ETMVMIDCVR, ETMPDCR + * + * Read only + * ETMCCR, ETMSCR, ETMIDR, ETMCCER, ETMOSLSR + * ETMLSR, ETMAUTHSTATUS, ETMDEVID, ETMDEVTYPE, ETMPIDR4, ETMPIDR5, ETMPIDR6, + * ETMPIDR7, ETMPIDR0, ETMPIDR1, ETMPIDR2, ETMPIDR2, ETMPIDR3, ETMCIDR0, + * ETMCIDR1, ETMCIDR2, ETMCIDR3 + * + * Write only + * ETMOSLAR, ETMLAR + * Note: ETMCCER[11] controls WO nature of certain regs. Refer ETM arch spec. + */ +#define RCP14_ETMCR() MRC14(1, c0, c0, 0) +#define RCP14_ETMCCR() MRC14(1, c0, c1, 0) +#define RCP14_ETMTRIGGER() MRC14(1, c0, c2, 0) +#define RCP14_ETMASICCR() MRC14(1, c0, c3, 0) +#define RCP14_ETMSR() MRC14(1, c0, c4, 0) +#define RCP14_ETMSCR() MRC14(1, c0, c5, 0) +#define RCP14_ETMTSSCR() MRC14(1, c0, c6, 0) +#define RCP14_ETMTECR2() MRC14(1, c0, c7, 0) +#define RCP14_ETMTEEVR() MRC14(1, c0, c8, 0) +#define RCP14_ETMTECR1() MRC14(1, c0, c9, 0) +#define RCP14_ETMFFRR() MRC14(1, c0, c10, 0) +#define RCP14_ETMFFLR() MRC14(1, c0, c11, 0) +#define RCP14_ETMVDEVR() MRC14(1, c0, c12, 0) +#define RCP14_ETMVDCR1() MRC14(1, c0, c13, 0) +#define RCP14_ETMVDCR2() MRC14(1, c0, c14, 0) +#define RCP14_ETMVDCR3() MRC14(1, c0, c15, 0) +#define RCP14_ETMACVR0() MRC14(1, c0, c0, 1) +#define RCP14_ETMACVR1() MRC14(1, c0, c1, 1) +#define RCP14_ETMACVR2() MRC14(1, c0, c2, 1) +#define RCP14_ETMACVR3() MRC14(1, c0, c3, 1) +#define RCP14_ETMACVR4() MRC14(1, c0, c4, 1) +#define RCP14_ETMACVR5() MRC14(1, c0, c5, 1) +#define RCP14_ETMACVR6() MRC14(1, c0, c6, 1) +#define RCP14_ETMACVR7() MRC14(1, c0, c7, 1) +#define RCP14_ETMACVR8() MRC14(1, c0, c8, 1) +#define RCP14_ETMACVR9() MRC14(1, c0, c9, 1) +#define RCP14_ETMACVR10() MRC14(1, c0, c10, 1) +#define RCP14_ETMACVR11() MRC14(1, c0, c11, 1) +#define RCP14_ETMACVR12() MRC14(1, c0, c12, 1) +#define RCP14_ETMACVR13() MRC14(1, c0, c13, 1) +#define RCP14_ETMACVR14() MRC14(1, c0, c14, 1) +#define RCP14_ETMACVR15() MRC14(1, c0, c15, 1) +#define RCP14_ETMACTR0() MRC14(1, c0, c0, 2) +#define RCP14_ETMACTR1() MRC14(1, c0, c1, 2) +#define RCP14_ETMACTR2() MRC14(1, c0, c2, 2) +#define RCP14_ETMACTR3() MRC14(1, c0, c3, 2) +#define RCP14_ETMACTR4() MRC14(1, c0, c4, 2) +#define RCP14_ETMACTR5() MRC14(1, c0, c5, 2) +#define RCP14_ETMACTR6() MRC14(1, c0, c6, 2) +#define RCP14_ETMACTR7() MRC14(1, c0, c7, 2) +#define RCP14_ETMACTR8() MRC14(1, c0, c8, 2) +#define RCP14_ETMACTR9() MRC14(1, c0, c9, 2) +#define RCP14_ETMACTR10() MRC14(1, c0, c10, 2) +#define RCP14_ETMACTR11() MRC14(1, c0, c11, 2) +#define RCP14_ETMACTR12() MRC14(1, c0, c12, 2) +#define RCP14_ETMACTR13() MRC14(1, c0, c13, 2) +#define RCP14_ETMACTR14() MRC14(1, c0, c14, 2) +#define RCP14_ETMACTR15() MRC14(1, c0, c15, 2) +#define RCP14_ETMDCVR0() MRC14(1, c0, c0, 3) +#define RCP14_ETMDCVR2() MRC14(1, c0, c2, 3) +#define RCP14_ETMDCVR4() MRC14(1, c0, c4, 3) +#define RCP14_ETMDCVR6() MRC14(1, c0, c6, 3) +#define RCP14_ETMDCVR8() MRC14(1, c0, c8, 3) +#define RCP14_ETMDCVR10() MRC14(1, c0, c10, 3) +#define RCP14_ETMDCVR12() MRC14(1, c0, c12, 3) +#define RCP14_ETMDCVR14() MRC14(1, c0, c14, 3) +#define RCP14_ETMDCMR0() MRC14(1, c0, c0, 4) +#define RCP14_ETMDCMR2() MRC14(1, c0, c2, 4) +#define RCP14_ETMDCMR4() MRC14(1, c0, c4, 4) +#define RCP14_ETMDCMR6() MRC14(1, c0, c6, 4) +#define RCP14_ETMDCMR8() MRC14(1, c0, c8, 4) +#define RCP14_ETMDCMR10() MRC14(1, c0, c10, 4) +#define RCP14_ETMDCMR12() MRC14(1, c0, c12, 4) +#define RCP14_ETMDCMR14() MRC14(1, c0, c14, 4) +#define RCP14_ETMCNTRLDVR0() MRC14(1, c0, c0, 5) +#define RCP14_ETMCNTRLDVR1() MRC14(1, c0, c1, 5) +#define RCP14_ETMCNTRLDVR2() MRC14(1, c0, c2, 5) +#define RCP14_ETMCNTRLDVR3() MRC14(1, c0, c3, 5) +#define RCP14_ETMCNTENR0() MRC14(1, c0, c4, 5) +#define RCP14_ETMCNTENR1() MRC14(1, c0, c5, 5) +#define RCP14_ETMCNTENR2() MRC14(1, c0, c6, 5) +#define RCP14_ETMCNTENR3() MRC14(1, c0, c7, 5) +#define RCP14_ETMCNTRLDEVR0() MRC14(1, c0, c8, 5) +#define RCP14_ETMCNTRLDEVR1() MRC14(1, c0, c9, 5) +#define RCP14_ETMCNTRLDEVR2() MRC14(1, c0, c10, 5) +#define RCP14_ETMCNTRLDEVR3() MRC14(1, c0, c11, 5) +#define RCP14_ETMCNTVR0() MRC14(1, c0, c12, 5) +#define RCP14_ETMCNTVR1() MRC14(1, c0, c13, 5) +#define RCP14_ETMCNTVR2() MRC14(1, c0, c14, 5) +#define RCP14_ETMCNTVR3() MRC14(1, c0, c15, 5) +#define RCP14_ETMSQ12EVR() MRC14(1, c0, c0, 6) +#define RCP14_ETMSQ21EVR() MRC14(1, c0, c1, 6) +#define RCP14_ETMSQ23EVR() MRC14(1, c0, c2, 6) +#define RCP14_ETMSQ31EVR() MRC14(1, c0, c3, 6) +#define RCP14_ETMSQ32EVR() MRC14(1, c0, c4, 6) +#define RCP14_ETMSQ13EVR() MRC14(1, c0, c5, 6) +#define RCP14_ETMSQR() MRC14(1, c0, c7, 6) +#define RCP14_ETMEXTOUTEVR0() MRC14(1, c0, c8, 6) +#define RCP14_ETMEXTOUTEVR1() MRC14(1, c0, c9, 6) +#define RCP14_ETMEXTOUTEVR2() MRC14(1, c0, c10, 6) +#define RCP14_ETMEXTOUTEVR3() MRC14(1, c0, c11, 6) +#define RCP14_ETMCIDCVR0() MRC14(1, c0, c12, 6) +#define RCP14_ETMCIDCVR1() MRC14(1, c0, c13, 6) +#define RCP14_ETMCIDCVR2() MRC14(1, c0, c14, 6) +#define RCP14_ETMCIDCMR() MRC14(1, c0, c15, 6) +#define RCP14_ETMIMPSPEC0() MRC14(1, c0, c0, 7) +#define RCP14_ETMIMPSPEC1() MRC14(1, c0, c1, 7) +#define RCP14_ETMIMPSPEC2() MRC14(1, c0, c2, 7) +#define RCP14_ETMIMPSPEC3() MRC14(1, c0, c3, 7) +#define RCP14_ETMIMPSPEC4() MRC14(1, c0, c4, 7) +#define RCP14_ETMIMPSPEC5() MRC14(1, c0, c5, 7) +#define RCP14_ETMIMPSPEC6() MRC14(1, c0, c6, 7) +#define RCP14_ETMIMPSPEC7() MRC14(1, c0, c7, 7) +#define RCP14_ETMSYNCFR() MRC14(1, c0, c8, 7) +#define RCP14_ETMIDR() MRC14(1, c0, c9, 7) +#define RCP14_ETMCCER() MRC14(1, c0, c10, 7) +#define RCP14_ETMEXTINSELR() MRC14(1, c0, c11, 7) +#define RCP14_ETMTESSEICR() MRC14(1, c0, c12, 7) +#define RCP14_ETMEIBCR() MRC14(1, c0, c13, 7) +#define RCP14_ETMTSEVR() MRC14(1, c0, c14, 7) +#define RCP14_ETMAUXCR() MRC14(1, c0, c15, 7) +#define RCP14_ETMTRACEIDR() MRC14(1, c1, c0, 0) +#define RCP14_ETMIDR2() MRC14(1, c1, c2, 0) +#define RCP14_ETMVMIDCVR() MRC14(1, c1, c0, 1) +#define RCP14_ETMOSLSR() MRC14(1, c1, c1, 4) +/* Not available in PFTv1.1 */ +#define RCP14_ETMOSSRR() MRC14(1, c1, c2, 4) +#define RCP14_ETMPDCR() MRC14(1, c1, c4, 4) +#define RCP14_ETMPDSR() MRC14(1, c1, c5, 4) +#define RCP14_ETMITCTRL() MRC14(1, c7, c0, 4) +#define RCP14_ETMCLAIMSET() MRC14(1, c7, c8, 6) +#define RCP14_ETMCLAIMCLR() MRC14(1, c7, c9, 6) +#define RCP14_ETMLSR() MRC14(1, c7, c13, 6) +#define RCP14_ETMAUTHSTATUS() MRC14(1, c7, c14, 6) +#define RCP14_ETMDEVID() MRC14(1, c7, c2, 7) +#define RCP14_ETMDEVTYPE() MRC14(1, c7, c3, 7) +#define RCP14_ETMPIDR4() MRC14(1, c7, c4, 7) +#define RCP14_ETMPIDR5() MRC14(1, c7, c5, 7) +#define RCP14_ETMPIDR6() MRC14(1, c7, c6, 7) +#define RCP14_ETMPIDR7() MRC14(1, c7, c7, 7) +#define RCP14_ETMPIDR0() MRC14(1, c7, c8, 7) +#define RCP14_ETMPIDR1() MRC14(1, c7, c9, 7) +#define RCP14_ETMPIDR2() MRC14(1, c7, c10, 7) +#define RCP14_ETMPIDR3() MRC14(1, c7, c11, 7) +#define RCP14_ETMCIDR0() MRC14(1, c7, c12, 7) +#define RCP14_ETMCIDR1() MRC14(1, c7, c13, 7) +#define RCP14_ETMCIDR2() MRC14(1, c7, c14, 7) +#define RCP14_ETMCIDR3() MRC14(1, c7, c15, 7) + +#define WCP14_ETMCR(val) MCR14(val, 1, c0, c0, 0) +#define WCP14_ETMTRIGGER(val) MCR14(val, 1, c0, c2, 0) +#define WCP14_ETMASICCR(val) MCR14(val, 1, c0, c3, 0) +#define WCP14_ETMSR(val) MCR14(val, 1, c0, c4, 0) +#define WCP14_ETMTSSCR(val) MCR14(val, 1, c0, c6, 0) +#define WCP14_ETMTECR2(val) MCR14(val, 1, c0, c7, 0) +#define WCP14_ETMTEEVR(val) MCR14(val, 1, c0, c8, 0) +#define WCP14_ETMTECR1(val) MCR14(val, 1, c0, c9, 0) +#define WCP14_ETMFFRR(val) MCR14(val, 1, c0, c10, 0) +#define WCP14_ETMFFLR(val) MCR14(val, 1, c0, c11, 0) +#define WCP14_ETMVDEVR(val) MCR14(val, 1, c0, c12, 0) +#define WCP14_ETMVDCR1(val) MCR14(val, 1, c0, c13, 0) +#define WCP14_ETMVDCR2(val) MCR14(val, 1, c0, c14, 0) +#define WCP14_ETMVDCR3(val) MCR14(val, 1, c0, c15, 0) +#define WCP14_ETMACVR0(val) MCR14(val, 1, c0, c0, 1) +#define WCP14_ETMACVR1(val) MCR14(val, 1, c0, c1, 1) +#define WCP14_ETMACVR2(val) MCR14(val, 1, c0, c2, 1) +#define WCP14_ETMACVR3(val) MCR14(val, 1, c0, c3, 1) +#define WCP14_ETMACVR4(val) MCR14(val, 1, c0, c4, 1) +#define WCP14_ETMACVR5(val) MCR14(val, 1, c0, c5, 1) +#define WCP14_ETMACVR6(val) MCR14(val, 1, c0, c6, 1) +#define WCP14_ETMACVR7(val) MCR14(val, 1, c0, c7, 1) +#define WCP14_ETMACVR8(val) MCR14(val, 1, c0, c8, 1) +#define WCP14_ETMACVR9(val) MCR14(val, 1, c0, c9, 1) +#define WCP14_ETMACVR10(val) MCR14(val, 1, c0, c10, 1) +#define WCP14_ETMACVR11(val) MCR14(val, 1, c0, c11, 1) +#define WCP14_ETMACVR12(val) MCR14(val, 1, c0, c12, 1) +#define WCP14_ETMACVR13(val) MCR14(val, 1, c0, c13, 1) +#define WCP14_ETMACVR14(val) MCR14(val, 1, c0, c14, 1) +#define WCP14_ETMACVR15(val) MCR14(val, 1, c0, c15, 1) +#define WCP14_ETMACTR0(val) MCR14(val, 1, c0, c0, 2) +#define WCP14_ETMACTR1(val) MCR14(val, 1, c0, c1, 2) +#define WCP14_ETMACTR2(val) MCR14(val, 1, c0, c2, 2) +#define WCP14_ETMACTR3(val) MCR14(val, 1, c0, c3, 2) +#define WCP14_ETMACTR4(val) MCR14(val, 1, c0, c4, 2) +#define WCP14_ETMACTR5(val) MCR14(val, 1, c0, c5, 2) +#define WCP14_ETMACTR6(val) MCR14(val, 1, c0, c6, 2) +#define WCP14_ETMACTR7(val) MCR14(val, 1, c0, c7, 2) +#define WCP14_ETMACTR8(val) MCR14(val, 1, c0, c8, 2) +#define WCP14_ETMACTR9(val) MCR14(val, 1, c0, c9, 2) +#define WCP14_ETMACTR10(val) MCR14(val, 1, c0, c10, 2) +#define WCP14_ETMACTR11(val) MCR14(val, 1, c0, c11, 2) +#define WCP14_ETMACTR12(val) MCR14(val, 1, c0, c12, 2) +#define WCP14_ETMACTR13(val) MCR14(val, 1, c0, c13, 2) +#define WCP14_ETMACTR14(val) MCR14(val, 1, c0, c14, 2) +#define WCP14_ETMACTR15(val) MCR14(val, 1, c0, c15, 2) +#define WCP14_ETMDCVR0(val) MCR14(val, 1, c0, c0, 3) +#define WCP14_ETMDCVR2(val) MCR14(val, 1, c0, c2, 3) +#define WCP14_ETMDCVR4(val) MCR14(val, 1, c0, c4, 3) +#define WCP14_ETMDCVR6(val) MCR14(val, 1, c0, c6, 3) +#define WCP14_ETMDCVR8(val) MCR14(val, 1, c0, c8, 3) +#define WCP14_ETMDCVR10(val) MCR14(val, 1, c0, c10, 3) +#define WCP14_ETMDCVR12(val) MCR14(val, 1, c0, c12, 3) +#define WCP14_ETMDCVR14(val) MCR14(val, 1, c0, c14, 3) +#define WCP14_ETMDCMR0(val) MCR14(val, 1, c0, c0, 4) +#define WCP14_ETMDCMR2(val) MCR14(val, 1, c0, c2, 4) +#define WCP14_ETMDCMR4(val) MCR14(val, 1, c0, c4, 4) +#define WCP14_ETMDCMR6(val) MCR14(val, 1, c0, c6, 4) +#define WCP14_ETMDCMR8(val) MCR14(val, 1, c0, c8, 4) +#define WCP14_ETMDCMR10(val) MCR14(val, 1, c0, c10, 4) +#define WCP14_ETMDCMR12(val) MCR14(val, 1, c0, c12, 4) +#define WCP14_ETMDCMR14(val) MCR14(val, 1, c0, c14, 4) +#define WCP14_ETMCNTRLDVR0(val) MCR14(val, 1, c0, c0, 5) +#define WCP14_ETMCNTRLDVR1(val) MCR14(val, 1, c0, c1, 5) +#define WCP14_ETMCNTRLDVR2(val) MCR14(val, 1, c0, c2, 5) +#define WCP14_ETMCNTRLDVR3(val) MCR14(val, 1, c0, c3, 5) +#define WCP14_ETMCNTENR0(val) MCR14(val, 1, c0, c4, 5) +#define WCP14_ETMCNTENR1(val) MCR14(val, 1, c0, c5, 5) +#define WCP14_ETMCNTENR2(val) MCR14(val, 1, c0, c6, 5) +#define WCP14_ETMCNTENR3(val) MCR14(val, 1, c0, c7, 5) +#define WCP14_ETMCNTRLDEVR0(val) MCR14(val, 1, c0, c8, 5) +#define WCP14_ETMCNTRLDEVR1(val) MCR14(val, 1, c0, c9, 5) +#define WCP14_ETMCNTRLDEVR2(val) MCR14(val, 1, c0, c10, 5) +#define WCP14_ETMCNTRLDEVR3(val) MCR14(val, 1, c0, c11, 5) +#define WCP14_ETMCNTVR0(val) MCR14(val, 1, c0, c12, 5) +#define WCP14_ETMCNTVR1(val) MCR14(val, 1, c0, c13, 5) +#define WCP14_ETMCNTVR2(val) MCR14(val, 1, c0, c14, 5) +#define WCP14_ETMCNTVR3(val) MCR14(val, 1, c0, c15, 5) +#define WCP14_ETMSQ12EVR(val) MCR14(val, 1, c0, c0, 6) +#define WCP14_ETMSQ21EVR(val) MCR14(val, 1, c0, c1, 6) +#define WCP14_ETMSQ23EVR(val) MCR14(val, 1, c0, c2, 6) +#define WCP14_ETMSQ31EVR(val) MCR14(val, 1, c0, c3, 6) +#define WCP14_ETMSQ32EVR(val) MCR14(val, 1, c0, c4, 6) +#define WCP14_ETMSQ13EVR(val) MCR14(val, 1, c0, c5, 6) +#define WCP14_ETMSQR(val) MCR14(val, 1, c0, c7, 6) +#define WCP14_ETMEXTOUTEVR0(val) MCR14(val, 1, c0, c8, 6) +#define WCP14_ETMEXTOUTEVR1(val) MCR14(val, 1, c0, c9, 6) +#define WCP14_ETMEXTOUTEVR2(val) MCR14(val, 1, c0, c10, 6) +#define WCP14_ETMEXTOUTEVR3(val) MCR14(val, 1, c0, c11, 6) +#define WCP14_ETMCIDCVR0(val) MCR14(val, 1, c0, c12, 6) +#define WCP14_ETMCIDCVR1(val) MCR14(val, 1, c0, c13, 6) +#define WCP14_ETMCIDCVR2(val) MCR14(val, 1, c0, c14, 6) +#define WCP14_ETMCIDCMR(val) MCR14(val, 1, c0, c15, 6) +#define WCP14_ETMIMPSPEC0(val) MCR14(val, 1, c0, c0, 7) +#define WCP14_ETMIMPSPEC1(val) MCR14(val, 1, c0, c1, 7) +#define WCP14_ETMIMPSPEC2(val) MCR14(val, 1, c0, c2, 7) +#define WCP14_ETMIMPSPEC3(val) MCR14(val, 1, c0, c3, 7) +#define WCP14_ETMIMPSPEC4(val) MCR14(val, 1, c0, c4, 7) +#define WCP14_ETMIMPSPEC5(val) MCR14(val, 1, c0, c5, 7) +#define WCP14_ETMIMPSPEC6(val) MCR14(val, 1, c0, c6, 7) +#define WCP14_ETMIMPSPEC7(val) MCR14(val, 1, c0, c7, 7) +/* Can be read only in ETMv3.4, ETMv3.5 */ +#define WCP14_ETMSYNCFR(val) MCR14(val, 1, c0, c8, 7) +#define WCP14_ETMEXTINSELR(val) MCR14(val, 1, c0, c11, 7) +#define WCP14_ETMTESSEICR(val) MCR14(val, 1, c0, c12, 7) +#define WCP14_ETMEIBCR(val) MCR14(val, 1, c0, c13, 7) +#define WCP14_ETMTSEVR(val) MCR14(val, 1, c0, c14, 7) +#define WCP14_ETMAUXCR(val) MCR14(val, 1, c0, c15, 7) +#define WCP14_ETMTRACEIDR(val) MCR14(val, 1, c1, c0, 0) +#define WCP14_ETMIDR2(val) MCR14(val, 1, c1, c2, 0) +#define WCP14_ETMVMIDCVR(val) MCR14(val, 1, c1, c0, 1) +#define WCP14_ETMOSLAR(val) MCR14(val, 1, c1, c0, 4) +/* Not available in PFTv1.1 */ +#define WCP14_ETMOSSRR(val) MCR14(val, 1, c1, c2, 4) +#define WCP14_ETMPDCR(val) MCR14(val, 1, c1, c4, 4) +#define WCP14_ETMPDSR(val) MCR14(val, 1, c1, c5, 4) +#define WCP14_ETMITCTRL(val) MCR14(val, 1, c7, c0, 4) +#define WCP14_ETMCLAIMSET(val) MCR14(val, 1, c7, c8, 6) +#define WCP14_ETMCLAIMCLR(val) MCR14(val, 1, c7, c9, 6) +/* Writes to this from CP14 interface are ignored */ +#define WCP14_ETMLAR(val) MCR14(val, 1, c7, c12, 6) + +#endif diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index a71b417b1856..af79da40af2a 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -8,6 +8,7 @@ static inline void ack_bad_irq(int irq) { extern unsigned long irq_err_count; irq_err_count++; + pr_crit("unexpected IRQ trap at vector %02x\n", irq); } void set_irq_flags(unsigned int irq, unsigned int flags); diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 7fc42784becb..8292b5f81e23 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -22,6 +22,9 @@ struct hw_pci { #ifdef CONFIG_PCI_DOMAINS int domain; #endif +#ifdef CONFIG_PCI_MSI + struct msi_controller *msi_ctrl; +#endif struct pci_ops *ops; int nr_controllers; void **private_data; @@ -36,8 +39,6 @@ struct hw_pci { resource_size_t start, resource_size_t size, resource_size_t align); - void (*add_bus)(struct pci_bus *bus); - void (*remove_bus)(struct pci_bus *bus); }; /* @@ -47,6 +48,9 @@ struct pci_sys_data { #ifdef CONFIG_PCI_DOMAINS int domain; #endif +#ifdef CONFIG_PCI_MSI + struct msi_controller *msi_ctrl; +#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ @@ -65,8 +69,6 @@ struct pci_sys_data { resource_size_t start, resource_size_t size, resource_size_t align); - void (*add_bus)(struct pci_bus *bus); - void (*remove_bus)(struct pci_bus *bus); void *private_data; /* platform controller private data */ }; diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index d428e386c88e..3446f6a1d9fa 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -219,6 +219,23 @@ void __mcpm_outbound_leave_critical(unsigned int cluster, int state); bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); int __mcpm_cluster_state(unsigned int cluster); +/** + * mcpm_sync_init - Initialize the cluster synchronization support + * + * @power_up_setup: platform specific function invoked during very + * early CPU/cluster bringup stage. + * + * This prepares memory used by vlocks and the MCPM state machine used + * across CPUs that may have their caches active or inactive. Must be + * called only after a successful call to mcpm_platform_register(). + * + * The power_up_setup argument is a pointer to assembly code called when + * the MMU and caches are still disabled during boot and no stack space is + * available. The affinity level passed to that code corresponds to the + * resource that needs to be initialized (e.g. 1 for cluster level, 0 for + * CPU level). Proper exclusion mechanisms are already activated at that + * point. + */ int __init mcpm_sync_init( void (*power_up_setup)(unsigned int affinity_level)); diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index 209e6504922e..a89b4076cde4 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -30,14 +30,14 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - register unsigned long *sp asm ("sp"); /* * Read TPIDRPRW. * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) + : "Q" (*(const unsigned long *)current_stack_pointer)); return off; } diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 78a779361682..19cfab526d13 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -157,7 +157,15 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); + extern pmdval_t user_pmd_table; + pmdval_t prot; + + if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE)) + prot = user_pmd_table; + else + prot = _PAGE_USER_TABLE; + + __pmd_populate(pmdp, page_to_phys(ptep), prot); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index 5cfba15cb401..5e68278e953e 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -20,12 +20,14 @@ #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ /* * - section */ +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index 9fd61c72a33a..f8f1cff62065 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -76,6 +76,7 @@ #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ /* diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 3b30062975b2..d5cac545ba33 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -252,17 +252,57 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, set_pte_ext(ptep, pteval, ext); } -#define PTE_BIT_FUNC(fn,op) \ -static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } - -PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); -PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); -PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); -PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); -PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); -PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); -PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); -PTE_BIT_FUNC(mknexec, |= L_PTE_XN); +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) +{ + pte_val(pte) &= ~pgprot_val(prot); + return pte; +} + +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) +{ + pte_val(pte) |= pgprot_val(prot); + return pte; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_XN)); +} + +static inline pte_t pte_mknexec(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_XN)); +} static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 601264d983fa..51622ba7c4a6 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -154,9 +154,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) return regs->ARM_sp; } -#define current_pt_regs(void) ({ \ - register unsigned long sp asm ("sp"); \ - (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \ +#define current_pt_regs(void) ({ (struct pt_regs *) \ + ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \ }) #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index ce73ab635414..d890e41f5520 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -90,14 +90,19 @@ struct thread_info { #define init_stack (init_thread_union.stack) /* + * how to get the current stack pointer in C + */ +register unsigned long current_stack_pointer asm ("sp"); + +/* * how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) __attribute_const__; static inline struct thread_info *current_thread_info(void) { - register unsigned long sp asm ("sp"); - return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); + return (struct thread_info *) + (current_stack_pointer & ~(THREAD_SIZE - 1)); } #define thread_saved_pc(tsk) \ diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h index f4ab34fd4f72..ee5f3084243c 100644 --- a/arch/arm/include/asm/vfp.h +++ b/arch/arm/include/asm/vfp.h @@ -22,6 +22,7 @@ #define FPSID_NODOUBLE (1<<20) #define FPSID_ARCH_BIT (16) #define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT) +#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT) #define FPSID_PART_BIT (8) #define FPSID_PART_MASK (0xFF << FPSID_PART_BIT) #define FPSID_VARIANT_BIT (4) @@ -75,6 +76,10 @@ /* MVFR0 bits */ #define MVFR0_A_SIMD_BIT (0) #define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT) +#define MVFR0_SP_BIT (4) +#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT) +#define MVFR0_DP_BIT (8) +#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT) /* Bit patterns for decoding the packaged operation descriptors */ #define VFPOPDESC_LENGTH_BIT (9) diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index e8275ea88e88..efd562412850 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -5,6 +5,18 @@ #include <linux/dma-attrs.h> #include <linux/dma-mapping.h> +void __xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs); +void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); +void __xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir); + +void __xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir); + static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) @@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + bool local = PFN_DOWN(dev_addr) == page_to_pfn(page); + /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise + * is a foreign page grant-mapped in dom0. If the page is local we + * can safely call the native dma_ops function, otherwise we call + * the xen specific function. */ + if (local) + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + else + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); } -void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + struct dma_attrs *attrs) +{ + unsigned long pfn = PFN_DOWN(handle); + /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will + * always return false. If the page is local we can safely call the + * native dma_ops function, otherwise we call the xen specific + * function. */ + if (pfn_valid(pfn)) { + if (__generic_dma_ops(hwdev)->unmap_page) + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); + } else + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); +} -void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) { + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); + } else + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); +} -void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) { + if (__generic_dma_ops(hwdev)->sync_single_for_device) + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); + } else + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); +} #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 135c24a5ba26..68c739b3fdf4 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) #define xen_unmap(cookie) iounmap((cookie)) +bool xen_arch_need_swiotlb(struct device *dev, + unsigned long pfn, + unsigned long mfn); + #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 8dcbed5016ac..fb2b71ebe3f2 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -30,7 +30,6 @@ else obj-y += entry-armv.o endif -obj-$(CONFIG_OC_ETM) += etm.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o @@ -47,6 +46,7 @@ endif obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o +obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o @@ -67,7 +67,7 @@ test-kprobes-objs += kprobes-test-arm.o endif obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o -obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_KGDB) += kgdb.o patch.o obj-$(CONFIG_ARM_UNWIND) += unwind.o obj-$(CONFIG_HAVE_TCM) += tcm.o obj-$(CONFIG_OF) += devtree.o @@ -84,6 +84,7 @@ obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o +CFLAGS_pj4-cp0.o := -marm AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o diff --git a/arch/arm/kernel/atags_compat.c b/arch/arm/kernel/atags_compat.c index 5236ad38f417..05c28b12353c 100644 --- a/arch/arm/kernel/atags_compat.c +++ b/arch/arm/kernel/atags_compat.c @@ -97,8 +97,7 @@ static void __init build_tag_list(struct param_struct *params, void *taglist) struct tag *tag = taglist; if (params->u1.s.page_size != PAGE_SIZE) { - printk(KERN_WARNING "Warning: bad configuration page, " - "trying to continue\n"); + pr_warn("Warning: bad configuration page, trying to continue\n"); return; } @@ -109,8 +108,7 @@ static void __init build_tag_list(struct param_struct *params, void *taglist) params->u1.s.nr_pages != 0x04000 && params->u1.s.nr_pages != 0x08000 && params->u1.s.nr_pages != 0x10000) { - printk(KERN_WARNING "Warning: bad NeTTrom parameters " - "detected, using defaults\n"); + pr_warn("Warning: bad NeTTrom parameters detected, using defaults\n"); params->u1.s.nr_pages = 0x1000; /* 16MB */ params->u1.s.ramdisk_size = 0; diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 528f8af2addb..68c6ae0b9e4c 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -167,8 +167,7 @@ static void __init parse_tags(const struct tag *t) { for (; t->hdr.size; t = tag_next(t)) if (!parse_tag(t)) - printk(KERN_WARNING - "Ignoring unrecognised tag 0x%08x\n", + pr_warn("Ignoring unrecognised tag 0x%08x\n", t->hdr.tag); } @@ -193,7 +192,7 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) */ for_each_machine_desc(p) if (machine_nr == p->nr) { - printk("Machine: %s\n", p->name); + pr_info("Machine: %s\n", p->name); mdesc = p; break; } diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c index c7ff8073416f..5a3379055f55 100644 --- a/arch/arm/kernel/atags_proc.c +++ b/arch/arm/kernel/atags_proc.c @@ -41,7 +41,7 @@ static int __init init_atags_procfs(void) size_t size; if (tag->hdr.tag != ATAG_CORE) { - printk(KERN_INFO "No ATAGs?"); + pr_info("No ATAGs?"); return -EINVAL; } @@ -68,7 +68,7 @@ static int __init init_atags_procfs(void) nomem: kfree(b); - printk(KERN_ERR "Exporting ATAGs: not enough memory\n"); + pr_err("Exporting ATAGs: not enough memory\n"); return -ENOMEM; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 17a26c17f7f5..a4effd6d8f2f 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -18,6 +18,15 @@ static int debug_pci; +#ifdef CONFIG_PCI_MSI +struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) +{ + struct pci_sys_data *sysdata = dev->bus->sysdata; + + return sysdata->msi_ctrl; +} +#endif + /* * We can't use pci_get_device() here since we are * called from interrupt context. @@ -355,25 +364,11 @@ void pcibios_fixup_bus(struct pci_bus *bus) /* * Report what we did for this bus */ - printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n", + pr_info("PCI: bus%d: Fast back to back transfers %sabled\n", bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); } EXPORT_SYMBOL(pcibios_fixup_bus); -void pcibios_add_bus(struct pci_bus *bus) -{ - struct pci_sys_data *sys = bus->sysdata; - if (sys->add_bus) - sys->add_bus(bus); -} - -void pcibios_remove_bus(struct pci_bus *bus) -{ - struct pci_sys_data *sys = bus->sysdata; - if (sys->remove_bus) - sys->remove_bus(bus); -} - /* * Swizzle the device pin each time we cross a bridge. If a platform does * not provide a swizzle function, we perform the standard PCI swizzling. @@ -471,12 +466,13 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, #ifdef CONFIG_PCI_DOMAINS sys->domain = hw->domain; #endif +#ifdef CONFIG_PCI_MSI + sys->msi_ctrl = hw->msi_ctrl; +#endif sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; sys->align_resource = hw->align_resource; - sys->add_bus = hw->add_bus; - sys->remove_bus = hw->remove_bus; INIT_LIST_HEAD(&sys->resources); if (hw->private_data) diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index 360bb6d701f5..84363fe7bad2 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c @@ -213,8 +213,8 @@ void __init isa_init_dma(void) for (chan = 0; chan < 8; chan++) { int ret = isa_dma_add(chan, &isa_dma[chan]); if (ret) - printk(KERN_ERR "ISADMA%u: unable to register: %d\n", - chan, ret); + pr_err("ISADMA%u: unable to register: %d\n", + chan, ret); } request_dma(DMA_ISA_CASCADE, "cascade"); diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 7b829d9663b1..e651c4d0a0d9 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -79,7 +79,7 @@ int request_dma(unsigned int chan, const char *device_id) return ret; bad_dma: - printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan); + pr_err("dma: trying to allocate DMA%d\n", chan); return -EINVAL; busy: @@ -100,7 +100,7 @@ void free_dma(unsigned int chan) goto bad_dma; if (dma->active) { - printk(KERN_ERR "dma%d: freeing active DMA\n", chan); + pr_err("dma%d: freeing active DMA\n", chan); dma->d_ops->disable(chan, dma); dma->active = 0; } @@ -111,11 +111,11 @@ void free_dma(unsigned int chan) return; } - printk(KERN_ERR "dma%d: trying to free free DMA\n", chan); + pr_err("dma%d: trying to free free DMA\n", chan); return; bad_dma: - printk(KERN_ERR "dma: trying to free DMA%d\n", chan); + pr_err("dma: trying to free DMA%d\n", chan); } EXPORT_SYMBOL(free_dma); @@ -126,8 +126,7 @@ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) dma_t *dma = dma_channel(chan); if (dma->active) - printk(KERN_ERR "dma%d: altering DMA SG while " - "DMA active\n", chan); + pr_err("dma%d: altering DMA SG while DMA active\n", chan); dma->sg = sg; dma->sgcount = nr_sg; @@ -144,8 +143,7 @@ void __set_dma_addr (unsigned int chan, void *addr) dma_t *dma = dma_channel(chan); if (dma->active) - printk(KERN_ERR "dma%d: altering DMA address while " - "DMA active\n", chan); + pr_err("dma%d: altering DMA address while DMA active\n", chan); dma->sg = NULL; dma->addr = addr; @@ -162,8 +160,7 @@ void set_dma_count (unsigned int chan, unsigned long count) dma_t *dma = dma_channel(chan); if (dma->active) - printk(KERN_ERR "dma%d: altering DMA count while " - "DMA active\n", chan); + pr_err("dma%d: altering DMA count while DMA active\n", chan); dma->sg = NULL; dma->count = count; @@ -178,8 +175,7 @@ void set_dma_mode (unsigned int chan, unsigned int mode) dma_t *dma = dma_channel(chan); if (dma->active) - printk(KERN_ERR "dma%d: altering DMA mode while " - "DMA active\n", chan); + pr_err("dma%d: altering DMA mode while DMA active\n", chan); dma->dma_mode = mode; dma->invalid = 1; @@ -202,7 +198,7 @@ void enable_dma (unsigned int chan) return; free_dma: - printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan); + pr_err("dma%d: trying to enable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(enable_dma); @@ -223,7 +219,7 @@ void disable_dma (unsigned int chan) return; free_dma: - printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan); + pr_err("dma%d: trying to disable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(disable_dma); @@ -240,7 +236,7 @@ EXPORT_SYMBOL(dma_channel_active); void set_dma_page(unsigned int chan, char pagenr) { - printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan); + pr_err("dma%d: trying to set_dma_page\n", chan); } EXPORT_SYMBOL(set_dma_page); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 6bb09d4abdea..f8ccc21fa032 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -109,241 +109,6 @@ ENDPROC(ret_from_fork) #undef CALL #define CALL(x) .long x -#ifdef CONFIG_FUNCTION_TRACER -/* - * When compiling with -pg, gcc inserts a call to the mcount routine at the - * start of every function. In mcount, apart from the function's address (in - * lr), we need to get hold of the function's caller's address. - * - * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: - * - * bl mcount - * - * These versions have the limitation that in order for the mcount routine to - * be able to determine the function's caller's address, an APCS-style frame - * pointer (which is set up with something like the code below) is required. - * - * mov ip, sp - * push {fp, ip, lr, pc} - * sub fp, ip, #4 - * - * With EABI, these frame pointers are not available unless -mapcs-frame is - * specified, and if building as Thumb-2, not even then. - * - * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, - * with call sites like: - * - * push {lr} - * bl __gnu_mcount_nc - * - * With these compilers, frame pointers are not necessary. - * - * mcount can be thought of as a function called in the middle of a subroutine - * call. As such, it needs to be transparent for both the caller and the - * callee: the original lr needs to be restored when leaving mcount, and no - * registers should be clobbered. (In the __gnu_mcount_nc implementation, we - * clobber the ip register. This is OK because the ARM calling convention - * allows it to be clobbered in subroutines and doesn't use it to hold - * parameters.) - * - * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" - * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see - * arch/arm/kernel/ftrace.c). - */ - -#ifndef CONFIG_OLD_MCOUNT -#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) -#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. -#endif -#endif - -.macro mcount_adjust_addr rd, rn - bic \rd, \rn, #1 @ clear the Thumb bit if present - sub \rd, \rd, #MCOUNT_INSN_SIZE -.endm - -.macro __mcount suffix - mcount_enter - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, .Lftrace_stub - cmp r0, r2 - bne 1f - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - ldr r1, =ftrace_graph_return - ldr r2, [r1] - cmp r0, r2 - bne ftrace_graph_caller\suffix - - ldr r1, =ftrace_graph_entry - ldr r2, [r1] - ldr r0, =ftrace_graph_entry_stub - cmp r0, r2 - bne ftrace_graph_caller\suffix -#endif - - mcount_exit - -1: mcount_get_lr r1 @ lr of instrumented func - mcount_adjust_addr r0, lr @ instrumented function - adr lr, BSYM(2f) - mov pc, r2 -2: mcount_exit -.endm - -.macro __ftrace_caller suffix - mcount_enter - - mcount_get_lr r1 @ lr of instrumented func - mcount_adjust_addr r0, lr @ instrumented function - - .globl ftrace_call\suffix -ftrace_call\suffix: - bl ftrace_stub - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl ftrace_graph_call\suffix -ftrace_graph_call\suffix: - mov r0, r0 -#endif - - mcount_exit -.endm - -.macro __ftrace_graph_caller - sub r0, fp, #4 @ &lr of instrumented routine (&parent) -#ifdef CONFIG_DYNAMIC_FTRACE - @ called from __ftrace_caller, saved in mcount_enter - ldr r1, [sp, #16] @ instrumented routine (func) - mcount_adjust_addr r1, r1 -#else - @ called from __mcount, untouched in lr - mcount_adjust_addr r1, lr @ instrumented routine (func) -#endif - mov r2, fp @ frame pointer - bl prepare_ftrace_return - mcount_exit -.endm - -#ifdef CONFIG_OLD_MCOUNT -/* - * mcount - */ - -.macro mcount_enter - stmdb sp!, {r0-r3, lr} -.endm - -.macro mcount_get_lr reg - ldr \reg, [fp, #-4] -.endm - -.macro mcount_exit - ldr lr, [fp, #-4] - ldmia sp!, {r0-r3, pc} -.endm - -ENTRY(mcount) -#ifdef CONFIG_DYNAMIC_FTRACE - stmdb sp!, {lr} - ldr lr, [fp, #-4] - ldmia sp!, {pc} -#else - __mcount _old -#endif -ENDPROC(mcount) - -#ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(ftrace_caller_old) - __ftrace_caller _old -ENDPROC(ftrace_caller_old) -#endif - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller_old) - __ftrace_graph_caller -ENDPROC(ftrace_graph_caller_old) -#endif - -.purgem mcount_enter -.purgem mcount_get_lr -.purgem mcount_exit -#endif - -/* - * __gnu_mcount_nc - */ - -.macro mcount_enter -/* - * This pad compensates for the push {lr} at the call site. Note that we are - * unable to unwind through a function which does not otherwise save its lr. - */ - UNWIND(.pad #4) - stmdb sp!, {r0-r3, lr} - UNWIND(.save {r0-r3, lr}) -.endm - -.macro mcount_get_lr reg - ldr \reg, [sp, #20] -.endm - -.macro mcount_exit - ldmia sp!, {r0-r3, ip, lr} - ret ip -.endm - -ENTRY(__gnu_mcount_nc) -UNWIND(.fnstart) -#ifdef CONFIG_DYNAMIC_FTRACE - mov ip, lr - ldmia sp!, {lr} - ret ip -#else - __mcount -#endif -UNWIND(.fnend) -ENDPROC(__gnu_mcount_nc) - -#ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(ftrace_caller) -UNWIND(.fnstart) - __ftrace_caller -UNWIND(.fnend) -ENDPROC(ftrace_caller) -#endif - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) -UNWIND(.fnstart) - __ftrace_graph_caller -UNWIND(.fnend) -ENDPROC(ftrace_graph_caller) -#endif - -.purgem mcount_enter -.purgem mcount_get_lr -.purgem mcount_exit - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl return_to_handler -return_to_handler: - stmdb sp!, {r0-r3} - mov r0, fp @ frame pointer - bl ftrace_return_to_handler - mov lr, r0 @ r0 has real ret addr - ldmia sp!, {r0-r3} - ret lr -#endif - -ENTRY(ftrace_stub) -.Lftrace_stub: - ret lr -ENDPROC(ftrace_stub) - -#endif /* CONFIG_FUNCTION_TRACER */ - /*============================================================================= * SWI handler *----------------------------------------------------------------------------- diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S new file mode 100644 index 000000000000..fe57c73e70a4 --- /dev/null +++ b/arch/arm/kernel/entry-ftrace.S @@ -0,0 +1,243 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/assembler.h> +#include <asm/ftrace.h> +#include <asm/unwind.h> + +#include "entry-header.S" + +/* + * When compiling with -pg, gcc inserts a call to the mcount routine at the + * start of every function. In mcount, apart from the function's address (in + * lr), we need to get hold of the function's caller's address. + * + * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: + * + * bl mcount + * + * These versions have the limitation that in order for the mcount routine to + * be able to determine the function's caller's address, an APCS-style frame + * pointer (which is set up with something like the code below) is required. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 + * + * With EABI, these frame pointers are not available unless -mapcs-frame is + * specified, and if building as Thumb-2, not even then. + * + * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, + * with call sites like: + * + * push {lr} + * bl __gnu_mcount_nc + * + * With these compilers, frame pointers are not necessary. + * + * mcount can be thought of as a function called in the middle of a subroutine + * call. As such, it needs to be transparent for both the caller and the + * callee: the original lr needs to be restored when leaving mcount, and no + * registers should be clobbered. (In the __gnu_mcount_nc implementation, we + * clobber the ip register. This is OK because the ARM calling convention + * allows it to be clobbered in subroutines and doesn't use it to hold + * parameters.) + * + * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" + * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see + * arch/arm/kernel/ftrace.c). + */ + +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + +.macro mcount_adjust_addr rd, rn + bic \rd, \rn, #1 @ clear the Thumb bit if present + sub \rd, \rd, #MCOUNT_INSN_SIZE +.endm + +.macro __mcount suffix + mcount_enter + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, .Lftrace_stub + cmp r0, r2 + bne 1f + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldr r1, =ftrace_graph_return + ldr r2, [r1] + cmp r0, r2 + bne ftrace_graph_caller\suffix + + ldr r1, =ftrace_graph_entry + ldr r2, [r1] + ldr r0, =ftrace_graph_entry_stub + cmp r0, r2 + bne ftrace_graph_caller\suffix +#endif + + mcount_exit + +1: mcount_get_lr r1 @ lr of instrumented func + mcount_adjust_addr r0, lr @ instrumented function + adr lr, BSYM(2f) + mov pc, r2 +2: mcount_exit +.endm + +.macro __ftrace_caller suffix + mcount_enter + + mcount_get_lr r1 @ lr of instrumented func + mcount_adjust_addr r0, lr @ instrumented function + + .globl ftrace_call\suffix +ftrace_call\suffix: + bl ftrace_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call\suffix +ftrace_graph_call\suffix: + mov r0, r0 +#endif + + mcount_exit +.endm + +.macro __ftrace_graph_caller + sub r0, fp, #4 @ &lr of instrumented routine (&parent) +#ifdef CONFIG_DYNAMIC_FTRACE + @ called from __ftrace_caller, saved in mcount_enter + ldr r1, [sp, #16] @ instrumented routine (func) + mcount_adjust_addr r1, r1 +#else + @ called from __mcount, untouched in lr + mcount_adjust_addr r1, lr @ instrumented routine (func) +#endif + mov r2, fp @ frame pointer + bl prepare_ftrace_return + mcount_exit +.endm + +#ifdef CONFIG_OLD_MCOUNT +/* + * mcount + */ + +.macro mcount_enter + stmdb sp!, {r0-r3, lr} +.endm + +.macro mcount_get_lr reg + ldr \reg, [fp, #-4] +.endm + +.macro mcount_exit + ldr lr, [fp, #-4] + ldmia sp!, {r0-r3, pc} +.endm + +ENTRY(mcount) +#ifdef CONFIG_DYNAMIC_FTRACE + stmdb sp!, {lr} + ldr lr, [fp, #-4] + ldmia sp!, {pc} +#else + __mcount _old +#endif +ENDPROC(mcount) + +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller_old) + __ftrace_caller _old +ENDPROC(ftrace_caller_old) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller_old) + __ftrace_graph_caller +ENDPROC(ftrace_graph_caller_old) +#endif + +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit +#endif + +/* + * __gnu_mcount_nc + */ + +.macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) + stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) +.endm + +.macro mcount_get_lr reg + ldr \reg, [sp, #20] +.endm + +.macro mcount_exit + ldmia sp!, {r0-r3, ip, lr} + ret ip +.endm + +ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) +#ifdef CONFIG_DYNAMIC_FTRACE + mov ip, lr + ldmia sp!, {lr} + ret ip +#else + __mcount +#endif +UNWIND(.fnend) +ENDPROC(__gnu_mcount_nc) + +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller) +UNWIND(.fnstart) + __ftrace_caller +UNWIND(.fnend) +ENDPROC(ftrace_caller) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) + __ftrace_graph_caller +UNWIND(.fnend) +ENDPROC(ftrace_graph_caller) +#endif + +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl return_to_handler +return_to_handler: + stmdb sp!, {r0-r3} + mov r0, fp @ frame pointer + bl ftrace_return_to_handler + mov lr, r0 @ r0 has real ret addr + ldmia sp!, {r0-r3} + ret lr +#endif + +ENTRY(ftrace_stub) +.Lftrace_stub: + ret lr +ENDPROC(ftrace_stub) diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c deleted file mode 100644 index 131a6ab5f355..000000000000 --- a/arch/arm/kernel/etm.c +++ /dev/null @@ -1,654 +0,0 @@ -/* - * linux/arch/arm/kernel/etm.c - * - * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer. - * - * Copyright (C) 2009 Nokia Corporation. - * Alexander Shishkin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/io.h> -#include <linux/sysrq.h> -#include <linux/device.h> -#include <linux/clk.h> -#include <linux/amba/bus.h> -#include <linux/fs.h> -#include <linux/uaccess.h> -#include <linux/miscdevice.h> -#include <linux/vmalloc.h> -#include <linux/mutex.h> -#include <linux/module.h> -#include <asm/hardware/coresight.h> -#include <asm/sections.h> - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Alexander Shishkin"); - -/* - * ETM tracer state - */ -struct tracectx { - unsigned int etb_bufsz; - void __iomem *etb_regs; - void __iomem *etm_regs; - unsigned long flags; - int ncmppairs; - int etm_portsz; - struct device *dev; - struct clk *emu_clk; - struct mutex mutex; -}; - -static struct tracectx tracer; - -static inline bool trace_isrunning(struct tracectx *t) -{ - return !!(t->flags & TRACER_RUNNING); -} - -static int etm_setup_address_range(struct tracectx *t, int n, - unsigned long start, unsigned long end, int exclude, int data) -{ - u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \ - ETMAAT_NOVALCMP; - - if (n < 1 || n > t->ncmppairs) - return -EINVAL; - - /* comparators and ranges are numbered starting with 1 as opposed - * to bits in a word */ - n--; - - if (data) - flags |= ETMAAT_DLOADSTORE; - else - flags |= ETMAAT_IEXEC; - - /* first comparator for the range */ - etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2)); - etm_writel(t, start, ETMR_COMP_VAL(n * 2)); - - /* second comparator is right next to it */ - etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1)); - etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1)); - - flags = exclude ? ETMTE_INCLEXCL : 0; - etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL); - - return 0; -} - -static int trace_start(struct tracectx *t) -{ - u32 v; - unsigned long timeout = TRACER_TIMEOUT; - - etb_unlock(t); - - etb_writel(t, 0, ETBR_FORMATTERCTRL); - etb_writel(t, 1, ETBR_CTRL); - - etb_lock(t); - - /* configure etm */ - v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz); - - if (t->flags & TRACER_CYCLE_ACC) - v |= ETMCTRL_CYCLEACCURATE; - - etm_unlock(t); - - etm_writel(t, v, ETMR_CTRL); - - while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) - ; - if (!timeout) { - dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); - etm_lock(t); - return -EFAULT; - } - - etm_setup_address_range(t, 1, (unsigned long)_stext, - (unsigned long)_etext, 0, 0); - etm_writel(t, 0, ETMR_TRACEENCTRL2); - etm_writel(t, 0, ETMR_TRACESSCTRL); - etm_writel(t, 0x6f, ETMR_TRACEENEVT); - - v &= ~ETMCTRL_PROGRAM; - v |= ETMCTRL_PORTSEL; - - etm_writel(t, v, ETMR_CTRL); - - timeout = TRACER_TIMEOUT; - while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout) - ; - if (!timeout) { - dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n"); - etm_lock(t); - return -EFAULT; - } - - etm_lock(t); - - t->flags |= TRACER_RUNNING; - - return 0; -} - -static int trace_stop(struct tracectx *t) -{ - unsigned long timeout = TRACER_TIMEOUT; - - etm_unlock(t); - - etm_writel(t, 0x440, ETMR_CTRL); - while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) - ; - if (!timeout) { - dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); - etm_lock(t); - return -EFAULT; - } - - etm_lock(t); - - etb_unlock(t); - etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL); - - timeout = TRACER_TIMEOUT; - while (etb_readl(t, ETBR_FORMATTERCTRL) & - ETBFF_MANUAL_FLUSH && --timeout) - ; - if (!timeout) { - dev_dbg(t->dev, "Waiting for formatter flush to commence " - "timed out\n"); - etb_lock(t); - return -EFAULT; - } - - etb_writel(t, 0, ETBR_CTRL); - - etb_lock(t); - - t->flags &= ~TRACER_RUNNING; - - return 0; -} - -static int etb_getdatalen(struct tracectx *t) -{ - u32 v; - int rp, wp; - - v = etb_readl(t, ETBR_STATUS); - - if (v & 1) - return t->etb_bufsz; - - rp = etb_readl(t, ETBR_READADDR); - wp = etb_readl(t, ETBR_WRITEADDR); - - if (rp > wp) { - etb_writel(t, 0, ETBR_READADDR); - etb_writel(t, 0, ETBR_WRITEADDR); - - return 0; - } - - return wp - rp; -} - -/* sysrq+v will always stop the running trace and leave it at that */ -static void etm_dump(void) -{ - struct tracectx *t = &tracer; - u32 first = 0; - int length; - - if (!t->etb_regs) { - printk(KERN_INFO "No tracing hardware found\n"); - return; - } - - if (trace_isrunning(t)) - trace_stop(t); - - etb_unlock(t); - - length = etb_getdatalen(t); - - if (length == t->etb_bufsz) - first = etb_readl(t, ETBR_WRITEADDR); - - etb_writel(t, first, ETBR_READADDR); - - printk(KERN_INFO "Trace buffer contents length: %d\n", length); - printk(KERN_INFO "--- ETB buffer begin ---\n"); - for (; length; length--) - printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM))); - printk(KERN_INFO "\n--- ETB buffer end ---\n"); - - /* deassert the overflow bit */ - etb_writel(t, 1, ETBR_CTRL); - etb_writel(t, 0, ETBR_CTRL); - - etb_writel(t, 0, ETBR_TRIGGERCOUNT); - etb_writel(t, 0, ETBR_READADDR); - etb_writel(t, 0, ETBR_WRITEADDR); - - etb_lock(t); -} - -static void sysrq_etm_dump(int key) -{ - dev_dbg(tracer.dev, "Dumping ETB buffer\n"); - etm_dump(); -} - -static struct sysrq_key_op sysrq_etm_op = { - .handler = sysrq_etm_dump, - .help_msg = "etm-buffer-dump(v)", - .action_msg = "etm", -}; - -static int etb_open(struct inode *inode, struct file *file) -{ - if (!tracer.etb_regs) - return -ENODEV; - - file->private_data = &tracer; - - return nonseekable_open(inode, file); -} - -static ssize_t etb_read(struct file *file, char __user *data, - size_t len, loff_t *ppos) -{ - int total, i; - long length; - struct tracectx *t = file->private_data; - u32 first = 0; - u32 *buf; - - mutex_lock(&t->mutex); - - if (trace_isrunning(t)) { - length = 0; - goto out; - } - - etb_unlock(t); - - total = etb_getdatalen(t); - if (total == t->etb_bufsz) - first = etb_readl(t, ETBR_WRITEADDR); - - etb_writel(t, first, ETBR_READADDR); - - length = min(total * 4, (int)len); - buf = vmalloc(length); - - dev_dbg(t->dev, "ETB buffer length: %d\n", total); - dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS)); - for (i = 0; i < length / 4; i++) - buf[i] = etb_readl(t, ETBR_READMEM); - - /* the only way to deassert overflow bit in ETB status is this */ - etb_writel(t, 1, ETBR_CTRL); - etb_writel(t, 0, ETBR_CTRL); - - etb_writel(t, 0, ETBR_WRITEADDR); - etb_writel(t, 0, ETBR_READADDR); - etb_writel(t, 0, ETBR_TRIGGERCOUNT); - - etb_lock(t); - - length -= copy_to_user(data, buf, length); - vfree(buf); - -out: - mutex_unlock(&t->mutex); - - return length; -} - -static int etb_release(struct inode *inode, struct file *file) -{ - /* there's nothing to do here, actually */ - return 0; -} - -static const struct file_operations etb_fops = { - .owner = THIS_MODULE, - .read = etb_read, - .open = etb_open, - .release = etb_release, - .llseek = no_llseek, -}; - -static struct miscdevice etb_miscdev = { - .name = "tracebuf", - .minor = 0, - .fops = &etb_fops, -}; - -static int etb_probe(struct amba_device *dev, const struct amba_id *id) -{ - struct tracectx *t = &tracer; - int ret = 0; - - ret = amba_request_regions(dev, NULL); - if (ret) - goto out; - - t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); - if (!t->etb_regs) { - ret = -ENOMEM; - goto out_release; - } - - amba_set_drvdata(dev, t); - - etb_miscdev.parent = &dev->dev; - - ret = misc_register(&etb_miscdev); - if (ret) - goto out_unmap; - - t->emu_clk = clk_get(&dev->dev, "emu_src_ck"); - if (IS_ERR(t->emu_clk)) { - dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n"); - return -EFAULT; - } - - clk_enable(t->emu_clk); - - etb_unlock(t); - t->etb_bufsz = etb_readl(t, ETBR_DEPTH); - dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz); - - /* make sure trace capture is disabled */ - etb_writel(t, 0, ETBR_CTRL); - etb_writel(t, 0x1000, ETBR_FORMATTERCTRL); - etb_lock(t); - - dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n"); - -out: - return ret; - -out_unmap: - iounmap(t->etb_regs); - -out_release: - amba_release_regions(dev); - - return ret; -} - -static int etb_remove(struct amba_device *dev) -{ - struct tracectx *t = amba_get_drvdata(dev); - - iounmap(t->etb_regs); - t->etb_regs = NULL; - - clk_disable(t->emu_clk); - clk_put(t->emu_clk); - - amba_release_regions(dev); - - return 0; -} - -static struct amba_id etb_ids[] = { - { - .id = 0x0003b907, - .mask = 0x0007ffff, - }, - { 0, 0 }, -}; - -static struct amba_driver etb_driver = { - .drv = { - .name = "etb", - .owner = THIS_MODULE, - }, - .probe = etb_probe, - .remove = etb_remove, - .id_table = etb_ids, -}; - -/* use a sysfs file "trace_running" to start/stop tracing */ -static ssize_t trace_running_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%x\n", trace_isrunning(&tracer)); -} - -static ssize_t trace_running_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - unsigned int value; - int ret; - - if (sscanf(buf, "%u", &value) != 1) - return -EINVAL; - - mutex_lock(&tracer.mutex); - ret = value ? trace_start(&tracer) : trace_stop(&tracer); - mutex_unlock(&tracer.mutex); - - return ret ? : n; -} - -static struct kobj_attribute trace_running_attr = - __ATTR(trace_running, 0644, trace_running_show, trace_running_store); - -static ssize_t trace_info_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st; - int datalen; - - etb_unlock(&tracer); - datalen = etb_getdatalen(&tracer); - etb_wa = etb_readl(&tracer, ETBR_WRITEADDR); - etb_ra = etb_readl(&tracer, ETBR_READADDR); - etb_st = etb_readl(&tracer, ETBR_STATUS); - etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL); - etb_lock(&tracer); - - etm_unlock(&tracer); - etm_ctrl = etm_readl(&tracer, ETMR_CTRL); - etm_st = etm_readl(&tracer, ETMR_STATUS); - etm_lock(&tracer); - - return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n" - "ETBR_WRITEADDR:\t%08x\n" - "ETBR_READADDR:\t%08x\n" - "ETBR_STATUS:\t%08x\n" - "ETBR_FORMATTERCTRL:\t%08x\n" - "ETMR_CTRL:\t%08x\n" - "ETMR_STATUS:\t%08x\n", - datalen, - tracer.ncmppairs, - etb_wa, - etb_ra, - etb_st, - etb_fc, - etm_ctrl, - etm_st - ); -} - -static struct kobj_attribute trace_info_attr = - __ATTR(trace_info, 0444, trace_info_show, NULL); - -static ssize_t trace_mode_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%d %d\n", - !!(tracer.flags & TRACER_CYCLE_ACC), - tracer.etm_portsz); -} - -static ssize_t trace_mode_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - unsigned int cycacc, portsz; - - if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2) - return -EINVAL; - - mutex_lock(&tracer.mutex); - if (cycacc) - tracer.flags |= TRACER_CYCLE_ACC; - else - tracer.flags &= ~TRACER_CYCLE_ACC; - - tracer.etm_portsz = portsz & 0x0f; - mutex_unlock(&tracer.mutex); - - return n; -} - -static struct kobj_attribute trace_mode_attr = - __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); - -static int etm_probe(struct amba_device *dev, const struct amba_id *id) -{ - struct tracectx *t = &tracer; - int ret = 0; - - if (t->etm_regs) { - dev_dbg(&dev->dev, "ETM already initialized\n"); - ret = -EBUSY; - goto out; - } - - ret = amba_request_regions(dev, NULL); - if (ret) - goto out; - - t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); - if (!t->etm_regs) { - ret = -ENOMEM; - goto out_release; - } - - amba_set_drvdata(dev, t); - - mutex_init(&t->mutex); - t->dev = &dev->dev; - t->flags = TRACER_CYCLE_ACC; - t->etm_portsz = 1; - - etm_unlock(t); - (void)etm_readl(t, ETMMR_PDSR); - /* dummy first read */ - (void)etm_readl(&tracer, ETMMR_OSSRR); - - t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; - etm_writel(t, 0x440, ETMR_CTRL); - etm_lock(t); - - ret = sysfs_create_file(&dev->dev.kobj, - &trace_running_attr.attr); - if (ret) - goto out_unmap; - - /* failing to create any of these two is not fatal */ - ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr); - if (ret) - dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n"); - - ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr); - if (ret) - dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n"); - - dev_dbg(t->dev, "ETM AMBA driver initialized.\n"); - -out: - return ret; - -out_unmap: - iounmap(t->etm_regs); - -out_release: - amba_release_regions(dev); - - return ret; -} - -static int etm_remove(struct amba_device *dev) -{ - struct tracectx *t = amba_get_drvdata(dev); - - iounmap(t->etm_regs); - t->etm_regs = NULL; - - amba_release_regions(dev); - - sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr); - sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr); - sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr); - - return 0; -} - -static struct amba_id etm_ids[] = { - { - .id = 0x0003b921, - .mask = 0x0007ffff, - }, - { 0, 0 }, -}; - -static struct amba_driver etm_driver = { - .drv = { - .name = "etm", - .owner = THIS_MODULE, - }, - .probe = etm_probe, - .remove = etm_remove, - .id_table = etm_ids, -}; - -static int __init etm_init(void) -{ - int retval; - - retval = amba_driver_register(&etb_driver); - if (retval) { - printk(KERN_ERR "Failed to register etb\n"); - return retval; - } - - retval = amba_driver_register(&etm_driver); - if (retval) { - amba_driver_unregister(&etb_driver); - printk(KERN_ERR "Failed to probe etm\n"); - return retval; - } - - /* not being able to install this handler is not fatal */ - (void)register_sysrq_key('v', &sysrq_etm_op); - - return 0; -} - -device_initcall(etm_init); - diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index b37752a96652..059c3da0fee3 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -124,7 +124,7 @@ int claim_fiq(struct fiq_handler *f) void release_fiq(struct fiq_handler *f) { if (current_fiq != f) { - printk(KERN_ERR "%s FIQ trying to release %s FIQ\n", + pr_err("%s FIQ trying to release %s FIQ\n", f->name, current_fiq->name); dump_stack(); return; diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index af9a8a927a4e..b8c75e45a950 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -15,6 +15,7 @@ #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/module.h> +#include <linux/stop_machine.h> #include <asm/cacheflush.h> #include <asm/opcodes.h> @@ -35,6 +36,22 @@ #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ +static int __ftrace_modify_code(void *data) +{ + int *command = data; + + set_kernel_text_rw(); + ftrace_modify_all_code(*command); + set_kernel_text_ro(); + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + stop_machine(__ftrace_modify_code, &command, NULL); +} + static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return rec->arch.old_mcount ? OLD_NOP : NOP; @@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_post_process(void) { set_all_modules_text_ro(); + /* Make sure any TLB misses during machine stop are cleared. */ + flush_tlb_all(); return 0; } diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index b5b452f90f76..7fc70ae21185 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -29,6 +29,7 @@ #include <linux/hw_breakpoint.h> #include <linux/smp.h> #include <linux/cpu_pm.h> +#include <linux/coresight.h> #include <asm/cacheflush.h> #include <asm/cputype.h> @@ -36,7 +37,6 @@ #include <asm/hw_breakpoint.h> #include <asm/kdebug.h> #include <asm/traps.h> -#include <asm/hardware/coresight.h> /* Breakpoint currently in use for each BRP. */ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); @@ -976,7 +976,7 @@ static void reset_ctrl_regs(void *unused) * Unconditionally clear the OS lock by writing a value * other than CS_LAR_KEY to the access register. */ - ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY); + ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK); isb(); /* diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c index 9203cf883330..eedefe050022 100644 --- a/arch/arm/kernel/io.c +++ b/arch/arm/kernel/io.c @@ -51,6 +51,7 @@ void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) from++; } } +EXPORT_SYMBOL(_memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. @@ -66,6 +67,7 @@ void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count) to++; } } +EXPORT_SYMBOL(_memcpy_toio); /* * "memset" on IO memory space. @@ -79,7 +81,4 @@ void _memset_io(volatile void __iomem *dst, int c, size_t count) dst++; } } - -EXPORT_SYMBOL(_memcpy_fromio); -EXPORT_SYMBOL(_memcpy_toio); EXPORT_SYMBOL(_memset_io); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 7c81ec428b9b..ad857bada96c 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,6 +31,7 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> +#include <linux/ratelimit.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> @@ -82,7 +83,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (irq >= nr_irqs) { - printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); + pr_err("Trying to set irq flags for IRQ%d\n", irq); return; } @@ -135,7 +136,6 @@ int __init arch_probe_nr_irqs(void) #endif #ifdef CONFIG_HOTPLUG_CPU - static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); @@ -187,8 +187,8 @@ void migrate_irqs(void) affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); - if (affinity_broken && printk_ratelimit()) - pr_warn("IRQ%u no longer affine to CPU%u\n", + if (affinity_broken) + pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", i, smp_processor_id()); } diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index ad58e565fe98..49fadbda8c63 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -58,6 +58,7 @@ #define MMX_SIZE (0x98) .text + .arm /* * Lazy switching of Concan coprocessor context @@ -182,6 +183,8 @@ concan_load: tmcr wCon, r2 ret lr +ENDPROC(iwmmxt_task_enable) + /* * Back up Concan regs to save area and disable access to them * (mainly for gdb or sleep mode usage) @@ -232,6 +235,8 @@ ENTRY(iwmmxt_task_disable) 1: msr cpsr_c, ip @ restore interrupt mode ldmfd sp!, {r4, pc} +ENDPROC(iwmmxt_task_disable) + /* * Copy Concan state to given memory address * @@ -268,6 +273,8 @@ ENTRY(iwmmxt_task_copy) msr cpsr_c, ip @ restore interrupt mode ret r3 +ENDPROC(iwmmxt_task_copy) + /* * Restore Concan state from given memory address * @@ -304,6 +311,8 @@ ENTRY(iwmmxt_task_restore) msr cpsr_c, ip @ restore interrupt mode ret r3 +ENDPROC(iwmmxt_task_restore) + /* * Concan handling on task switch * @@ -335,6 +344,8 @@ ENTRY(iwmmxt_task_switch) mrc p15, 0, r1, c2, c0, 0 sub pc, lr, r1, lsr #32 @ cpwait and return +ENDPROC(iwmmxt_task_switch) + /* * Remove Concan ownership of given task * @@ -353,6 +364,8 @@ ENTRY(iwmmxt_task_release) msr cpsr_c, r2 @ restore interrupts ret lr +ENDPROC(iwmmxt_task_release) + .data concan_owner: .word 0 diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 4ce4f789446d..afeeb9ea6f43 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry, insn = arm_gen_nop(); if (is_static) - __patch_text(addr, insn); + __patch_text_early(addr, insn); else patch_text(addr, insn); } diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index a74b53c1b7df..07db2f8a1b45 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -12,8 +12,12 @@ #include <linux/irq.h> #include <linux/kdebug.h> #include <linux/kgdb.h> +#include <linux/uaccess.h> + #include <asm/traps.h> +#include "patch.h" + struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, @@ -244,6 +248,31 @@ void kgdb_arch_exit(void) unregister_die_notifier(&kgdb_notifier); } +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + + /* patch_text() only supports int-sized breakpoints */ + BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); + + err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + BREAK_INSTR_SIZE); + if (err) + return err; + + patch_text((void *)bpt->bpt_addr, + *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); + + return err; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ + patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); + + return 0; +} + /* * Register our undef instruction hooks with ARM undef core. * We regsiter a hook specifically looking for the KGB break inst diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8cf0996aa1a8..de2b085ad753 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags; static atomic_t waiting_for_crash_ipi; +static unsigned long dt_mem; /* * Provide a dummy crash_notes definition while crash dump arrives to arm. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. @@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image) return err; if (be32_to_cpu(header) == OF_DT_HEADER) - kexec_boot_atags = current_segment->mem; + dt_mem = current_segment->mem; } return 0; } @@ -126,12 +127,12 @@ void machine_crash_shutdown(struct pt_regs *regs) msecs--; } if (atomic_read(&waiting_for_crash_ipi) > 0) - printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); + pr_warn("Non-crashing CPUs did not react to IPI\n"); crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); - printk(KERN_INFO "Loading crashdump kernel...\n"); + pr_info("Loading crashdump kernel...\n"); } /* @@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ + set_kernel_text_rw(); kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; - if (!kexec_boot_atags) - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; - + kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET + + KEXEC_ARM_ATAGS_OFFSET; /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, @@ -177,7 +178,7 @@ void machine_kexec(struct kimage *image) reboot_entry_phys = (unsigned long)reboot_entry + (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); - printk(KERN_INFO "Bye!\n"); + pr_info("Bye!\n"); if (kexec_reinit) kexec_reinit(); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 6a4dffefd357..bea7db9e5b80 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -251,7 +251,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, #endif default: - printk(KERN_ERR "%s: unknown relocation: %u\n", + pr_err("%s: unknown relocation: %u\n", module->name, ELF32_R_TYPE(rel->r_info)); return -ENOEXEC; } diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 07314af47733..5038960e3c55 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -1,8 +1,11 @@ #include <linux/kernel.h> +#include <linux/spinlock.h> #include <linux/kprobes.h> +#include <linux/mm.h> #include <linux/stop_machine.h> #include <asm/cacheflush.h> +#include <asm/fixmap.h> #include <asm/smp_plat.h> #include <asm/opcodes.h> @@ -13,21 +16,77 @@ struct patch { unsigned int insn; }; -void __kprobes __patch_text(void *addr, unsigned int insn) +static DEFINE_SPINLOCK(patch_lock); + +static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) + __acquires(&patch_lock) +{ + unsigned int uintaddr = (uintptr_t) addr; + bool module = !core_kernel_text(uintaddr); + struct page *page; + + if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) + page = vmalloc_to_page(addr); + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) + page = virt_to_page(addr); + else + return addr; + + if (flags) + spin_lock_irqsave(&patch_lock, *flags); + else + __acquire(&patch_lock); + + set_fixmap(fixmap, page_to_phys(page)); + + return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap, unsigned long *flags) + __releases(&patch_lock) +{ + clear_fixmap(fixmap); + + if (flags) + spin_unlock_irqrestore(&patch_lock, *flags); + else + __release(&patch_lock); +} + +void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) { bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); + unsigned int uintaddr = (uintptr_t) addr; + bool twopage = false; + unsigned long flags; + void *waddr = addr; int size; + if (remap) + waddr = patch_map(addr, FIX_TEXT_POKE0, &flags); + else + __acquire(&patch_lock); + if (thumb2 && __opcode_is_thumb16(insn)) { - *(u16 *)addr = __opcode_to_mem_thumb16(insn); + *(u16 *)waddr = __opcode_to_mem_thumb16(insn); size = sizeof(u16); - } else if (thumb2 && ((uintptr_t)addr & 2)) { + } else if (thumb2 && (uintaddr & 2)) { u16 first = __opcode_thumb32_first(insn); u16 second = __opcode_thumb32_second(insn); - u16 *addrh = addr; + u16 *addrh0 = waddr; + u16 *addrh1 = waddr + 2; + + twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2; + if (twopage && remap) + addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL); + + *addrh0 = __opcode_to_mem_thumb16(first); + *addrh1 = __opcode_to_mem_thumb16(second); - addrh[0] = __opcode_to_mem_thumb16(first); - addrh[1] = __opcode_to_mem_thumb16(second); + if (twopage && addrh1 != addr + 2) { + flush_kernel_vmap_range(addrh1, 2); + patch_unmap(FIX_TEXT_POKE1, NULL); + } size = sizeof(u32); } else { @@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn) else insn = __opcode_to_mem_arm(insn); - *(u32 *)addr = insn; + *(u32 *)waddr = insn; size = sizeof(u32); } + if (waddr != addr) { + flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); + patch_unmap(FIX_TEXT_POKE0, &flags); + } else + __release(&patch_lock); + flush_icache_range((uintptr_t)(addr), (uintptr_t)(addr) + size); } @@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn) .insn = insn, }; - if (cache_ops_need_broadcast()) { - stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); - } else { - bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) - && __opcode_is_thumb32(insn) - && ((uintptr_t)addr & 2); - - if (straddles_word) - stop_machine(patch_text_stop_machine, &patch, NULL); - else - __patch_text(addr, insn); - } + stop_machine(patch_text_stop_machine, &patch, NULL); } diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h index b4731f2dac38..77e054c2f6cd 100644 --- a/arch/arm/kernel/patch.h +++ b/arch/arm/kernel/patch.h @@ -2,6 +2,16 @@ #define _ARM_KERNEL_PATCH_H void patch_text(void *addr, unsigned int insn); -void __patch_text(void *addr, unsigned int insn); +void __patch_text_real(void *addr, unsigned int insn, bool remap); + +static inline void __patch_text(void *addr, unsigned int insn) +{ + __patch_text_real(addr, insn, true); +} + +static inline void __patch_text_early(void *addr, unsigned int insn) +{ + __patch_text_real(addr, insn, false); +} #endif diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index fe972a2f3df3..fdfa3a78ec8c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -51,8 +51,8 @@ EXPORT_SYMBOL(__stack_chk_guard); static const char *processor_modes[] __maybe_unused = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", - "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , - "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" + "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" , + "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; static const char *isa_modes[] __maybe_unused = { diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 98ea4b7eb406..24b4a04846eb 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -39,13 +39,12 @@ void *return_address(unsigned int level) { struct return_address_data data; struct stackframe frame; - register unsigned long current_sp asm ("sp"); data.level = level + 2; data.addr = NULL; frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_sp; + frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)return_address; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c03106378b49..8361652b6dab 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -900,6 +900,7 @@ void __init setup_arch(char **cmdline_p) mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); machine_desc = mdesc; machine_name = mdesc->name; + dump_stack_set_arch_desc("%s", mdesc->name); if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index bd1983437205..8aa6f1b87c9e 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -592,7 +592,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } syscall = 0; } else if (thread_flags & _TIF_UPROBE) { - clear_thread_flag(TIF_UPROBE); uprobe_notify_resume(regs); } else { clear_thread_flag(TIF_NOTIFY_RESUME); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 13396d3d600e..5e6052e18850 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -225,7 +225,7 @@ void __cpu_die(unsigned int cpu) pr_err("CPU%u: cpu didn't die\n", cpu); return; } - printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); + pr_notice("CPU%u: shutdown\n", cpu); /* * platform_cpu_kill() is generally expected to do the powering off @@ -235,7 +235,7 @@ void __cpu_die(unsigned int cpu) * the requesting CPU and the dying CPU actually losing power. */ if (!platform_cpu_kill(cpu)) - printk("CPU%u: unable to kill\n", cpu); + pr_err("CPU%u: unable to kill\n", cpu); } /* @@ -351,7 +351,7 @@ asmlinkage void secondary_start_kernel(void) cpu_init(); - printk("CPU%u: Booted secondary processor\n", cpu); + pr_debug("CPU%u: Booted secondary processor\n", cpu); preempt_disable(); trace_hardirqs_off(); @@ -387,9 +387,6 @@ asmlinkage void secondary_start_kernel(void) void __init smp_cpus_done(unsigned int max_cpus) { - printk(KERN_INFO "SMP: Total of %d processors activated.\n", - num_online_cpus()); - hyp_mode_check(); } @@ -521,7 +518,7 @@ static void ipi_cpu_stop(unsigned int cpu) if (system_state == SYSTEM_BOOTING || system_state == SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); - printk(KERN_CRIT "CPU%u: stopping\n", cpu); + pr_crit("CPU%u: stopping\n", cpu); dump_stack(); raw_spin_unlock(&stop_lock); } @@ -615,8 +612,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; default: - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", - cpu, ipinr); + pr_crit("CPU%u: Unknown IPI message 0x%x\n", + cpu, ipinr); break; } diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 93090213c71c..172c6a05d27f 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -199,7 +199,7 @@ static void twd_calibrate_rate(void) * the timer ticks */ if (twd_timer_rate == 0) { - printk(KERN_INFO "Calibrating local timer... "); + pr_info("Calibrating local timer... "); /* Wait for a tick to start */ waitjiffies = get_jiffies_64() + 1; @@ -223,7 +223,7 @@ static void twd_calibrate_rate(void) twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); - printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, + pr_cont("%lu.%02luMHz.\n", twd_timer_rate / 1000000, (twd_timer_rate / 10000) % 100); } } diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index f065eb05d254..92b72375c4c7 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -134,12 +134,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk, frame.pc = thread_saved_pc(tsk); #endif } else { - register unsigned long current_sp asm ("sp"); - /* We don't want this function nor the caller */ data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_sp; + frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)__save_stack_trace; } diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 587fdfe1a72c..afdd51e30bec 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -260,7 +260,7 @@ static int __init swp_emulation_init(void) return -ENOMEM; #endif /* CONFIG_PROC_FS */ - printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n"); + pr_notice("Registering SWP/SWPB emulation handler\n"); register_undef_hook(&swp_hook); return 0; diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index e90a3148f385..b83f3b7737fb 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -400,7 +400,7 @@ asmlinkage long sys_oabi_sendto(int fd, void __user *buff, return sys_sendto(fd, buff, len, flags, addr, addrlen); } -asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) +asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { struct sockaddr __user *addr; int msg_namelen; @@ -446,7 +446,7 @@ asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args) break; case SYS_SENDMSG: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) - r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]); + r = sys_oabi_sendmsg(a[0], (struct user_msghdr __user *)a[1], a[2]); break; default: r = sys_socketcall(call, args); diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 80f0d69205e7..8ff8dbfbe9fb 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -72,7 +72,7 @@ static int __init thumbee_init(void) if ((pfr0 & 0x0000f000) != 0x00001000) return 0; - printk(KERN_INFO "ThumbEE CPU extension supported.\n"); + pr_info("ThumbEE CPU extension supported.\n"); elf_hwcap |= HWCAP_THUMBEE; thread_register_notifier(&thumbee_notifier_block); diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 89cfdd6e50cb..08b7847bf912 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -165,7 +165,7 @@ static void update_cpu_capacity(unsigned int cpu) set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); - printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n", + pr_info("CPU%u: update cpu_capacity %lu\n", cpu, arch_scale_cpu_capacity(NULL, cpu)); } @@ -269,7 +269,7 @@ void store_cpu_topology(unsigned int cpuid) update_cpu_capacity(cpuid); - printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", + pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, cpu_topology[cpuid].socket_id, mpidr); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 9f5d81881eb6..788e23fe64d8 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -198,14 +198,14 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) } if (!fp) { - printk("no frame pointer"); + pr_cont("no frame pointer"); ok = 0; } else if (verify_stack(fp)) { - printk("invalid frame pointer 0x%08x", fp); + pr_cont("invalid frame pointer 0x%08x", fp); ok = 0; } else if (fp < (unsigned long)end_of_stack(tsk)) - printk("frame pointer underflow"); - printk("\n"); + pr_cont("frame pointer underflow"); + pr_cont("\n"); if (ok) c_backtrace(fp, mode); @@ -240,8 +240,8 @@ static int __die(const char *str, int err, struct pt_regs *regs) static int die_counter; int ret; - printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP - S_ISA "\n", str, err, ++die_counter); + pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n", + str, err, ++die_counter); /* trap and error numbers are mostly meaningless on ARM */ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); @@ -250,8 +250,8 @@ static int __die(const char *str, int err, struct pt_regs *regs) print_modules(); __show_regs(regs); - printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); + pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, @@ -446,7 +446,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { - printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", + pr_info("%s (%d): undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); __show_regs(regs); dump_instr(KERN_INFO, regs); @@ -496,7 +496,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason) { console_verbose(); - printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]); + pr_crit("Bad mode in %s handler detected\n", handler[reason]); die("Oops - bad mode", regs, 0); local_irq_disable(); @@ -516,7 +516,7 @@ static int bad_syscall(int n, struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_SYSCALL) { - printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", + pr_err("[%d] %s: obsolete system call %08x.\n", task_pid_nr(current), current->comm, n); dump_instr(KERN_ERR, regs); } @@ -694,7 +694,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) * something catastrophic has happened */ if (user_debug & UDBG_SYSCALL) { - printk("[%d] %s: arm syscall %d\n", + pr_err("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); dump_instr("", regs); if (user_mode(regs)) { @@ -753,8 +753,8 @@ late_initcall(arm_mrc_hook_init); void __bad_xchg(volatile void *ptr, int size) { - printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", - __builtin_return_address(0), ptr, size); + pr_err("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", + __builtin_return_address(0), ptr, size); BUG(); } EXPORT_SYMBOL(__bad_xchg); @@ -771,8 +771,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) { - printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", - task_pid_nr(current), current->comm, code, instr); + pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n", + task_pid_nr(current), current->comm, code, instr); dump_instr(KERN_ERR, regs); show_pte(current->mm, addr); } @@ -788,29 +788,29 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) void __readwrite_bug(const char *fn) { - printk("%s called, but not implemented\n", fn); + pr_err("%s called, but not implemented\n", fn); BUG(); } EXPORT_SYMBOL(__readwrite_bug); void __pte_error(const char *file, int line, pte_t pte) { - printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); + pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); } void __pmd_error(const char *file, int line, pmd_t pmd) { - printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); + pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); } void __pgd_error(const char *file, int line, pgd_t pgd) { - printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); + pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); } asmlinkage void __div0(void) { - printk("Division by zero in kernel.\n"); + pr_err("Division by zero in kernel.\n"); dump_stack(); } EXPORT_SYMBOL(__div0); diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index cbb85c5fabf9..0bee233fef9a 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -471,7 +471,6 @@ int unwind_frame(struct stackframe *frame) void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - register unsigned long current_sp asm ("sp"); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); @@ -485,7 +484,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.pc = regs->ARM_lr; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_sp; + frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)unwind_backtrace; } else { diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8e95aa47457a..b31aa73e8076 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -8,6 +8,9 @@ #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/page.h> +#ifdef CONFIG_ARM_KERNMEM_PERMS +#include <asm/pgtable.h> +#endif #define PROC_INFO \ . = ALIGN(4); \ @@ -90,6 +93,11 @@ SECTIONS _text = .; HEAD_TEXT } + +#ifdef CONFIG_ARM_KERNMEM_PERMS + . = ALIGN(1<<SECTION_SHIFT); +#endif + .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; @@ -112,6 +120,9 @@ SECTIONS ARM_CPU_KEEP(PROC_INFO) } +#ifdef CONFIG_DEBUG_RODATA + . = ALIGN(1<<SECTION_SHIFT); +#endif RO_DATA(PAGE_SIZE) . = ALIGN(4); @@ -145,7 +156,11 @@ SECTIONS _etext = .; /* End of text and rodata section */ #ifndef CONFIG_XIP_KERNEL +# ifdef CONFIG_ARM_KERNMEM_PERMS + . = ALIGN(1<<SECTION_SHIFT); +# else . = ALIGN(PAGE_SIZE); +# endif __init_begin = .; #endif /* @@ -219,7 +234,11 @@ SECTIONS __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else +#ifdef CONFIG_ARM_KERNMEM_PERMS + . = ALIGN(1<<SECTION_SHIFT); +#else . = ALIGN(THREAD_SIZE); +#endif __init_end = .; __data_loc = .; #endif diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c index e42adc6bcdb1..bdbb8853a19b 100644 --- a/arch/arm/kernel/xscale-cp0.c +++ b/arch/arm/kernel/xscale-cp0.c @@ -157,15 +157,14 @@ static int __init xscale_cp0_init(void) if (cpu_has_iwmmxt()) { #ifndef CONFIG_IWMMXT - printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor " - "detected, but kernel support is missing.\n"); + pr_warn("CAUTION: XScale iWMMXt coprocessor detected, but kernel support is missing.\n"); #else - printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n"); + pr_info("XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); #endif } else { - printk(KERN_INFO "XScale DSP coprocessor detected.\n"); + pr_info("XScale DSP coprocessor detected.\n"); thread_register_notifier(&dsp_notifier_block); cp_access |= 1; } diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 66a477a3e3cc..7a235b9952be 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> /* * Prototype: @@ -77,6 +78,10 @@ stmdb sp!, {r0, r2, r3, \reg1, \reg2} .endm + .macro usave reg1 reg2 + UNWIND( .save {r0, r2, r3, \reg1, \reg2} ) + .endm + .macro exit reg1 reg2 add sp, sp, #8 ldmfd sp!, {r0, \reg1, \reg2} diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S index 3bc8eb811a73..652e4d98cd47 100644 --- a/arch/arm/lib/copy_template.S +++ b/arch/arm/lib/copy_template.S @@ -53,6 +53,12 @@ * data as needed by the implementation including this code. Called * upon code entry. * + * usave reg1 reg2 + * + * Unwind annotation macro is corresponding for 'enter' macro. + * It tell unwinder that preserved some provided registers on the stack + * and additional data by a prior 'enter' macro. + * * exit reg1 reg2 * * Restore registers with the values previously saved with the @@ -67,7 +73,12 @@ */ + UNWIND( .fnstart ) enter r4, lr + UNWIND( .fnend ) + + UNWIND( .fnstart ) + usave r4, lr @ in first stmdb block subs r2, r2, #4 blt 8f @@ -79,6 +90,11 @@ 1: subs r2, r2, #(28) stmfd sp!, {r5 - r8} + UNWIND( .fnend ) + + UNWIND( .fnstart ) + usave r4, lr + UNWIND( .save {r5 - r8} ) @ in second stmfd block blt 5f CALGN( ands ip, r0, #31 ) @@ -144,7 +160,10 @@ CALGN( bcs 2b ) 7: ldmfd sp!, {r5 - r8} + UNWIND( .fnend ) @ end of second stmfd block + UNWIND( .fnstart ) + usave r4, lr @ still in first stmdb block 8: movs r2, r2, lsl #31 ldr1b r1, r3, ne, abort=21f ldr1b r1, r4, cs, abort=21f @@ -173,10 +192,13 @@ ldr1w r1, lr, abort=21f beq 17f bgt 18f + UNWIND( .fnend ) .macro forward_copy_shift pull push + UNWIND( .fnstart ) + usave r4, lr @ still in first stmdb block subs r2, r2, #28 blt 14f @@ -187,7 +209,11 @@ CALGN( bcc 15f ) 11: stmfd sp!, {r5 - r9} + UNWIND( .fnend ) + UNWIND( .fnstart ) + usave r4, lr + UNWIND( .save {r5 - r9} ) @ in new second stmfd block PLD( pld [r1, #0] ) PLD( subs r2, r2, #96 ) PLD( pld [r1, #28] ) @@ -221,7 +247,10 @@ PLD( bge 13b ) ldmfd sp!, {r5 - r9} + UNWIND( .fnend ) @ end of the second stmfd block + UNWIND( .fnstart ) + usave r4, lr @ still in first stmdb block 14: ands ip, r2, #28 beq 16f @@ -236,6 +265,7 @@ 16: sub r1, r1, #(\push / 8) b 8b + UNWIND( .fnend ) .endm diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index d066df686e17..a9d3db16ecb5 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> /* * Prototype: @@ -80,6 +81,10 @@ stmdb sp!, {r0, r2, r3, \reg1, \reg2} .endm + .macro usave reg1 reg2 + UNWIND( .save {r0, r2, r3, \reg1, \reg2} ) + .endm + .macro exit reg1 reg2 add sp, sp, #8 ldmfd sp!, {r0, \reg1, \reg2} diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index a9b9e2287a09..7797e81e40e0 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> #define LDR1W_SHIFT 0 #define STR1W_SHIFT 0 @@ -48,6 +49,10 @@ stmdb sp!, {r0, \reg1, \reg2} .endm + .macro usave reg1 reg2 + UNWIND( .save {r0, \reg1, \reg2} ) + .endm + .macro exit reg1 reg2 ldmfd sp!, {r0, \reg1, \reg2} .endm diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index d1fc0c0c342c..69a9d47fc5ab 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> .text @@ -27,12 +28,17 @@ */ ENTRY(memmove) + UNWIND( .fnstart ) subs ip, r0, r1 cmphi r2, ip bls memcpy stmfd sp!, {r0, r4, lr} + UNWIND( .fnend ) + + UNWIND( .fnstart ) + UNWIND( .save {r0, r4, lr} ) @ in first stmfd block add r1, r1, r2 add r0, r0, r2 subs r2, r2, #4 @@ -45,6 +51,11 @@ ENTRY(memmove) 1: subs r2, r2, #(28) stmfd sp!, {r5 - r8} + UNWIND( .fnend ) + + UNWIND( .fnstart ) + UNWIND( .save {r0, r4, lr} ) + UNWIND( .save {r5 - r8} ) @ in second stmfd block blt 5f CALGN( ands ip, r0, #31 ) @@ -97,6 +108,10 @@ ENTRY(memmove) CALGN( bcs 2b ) 7: ldmfd sp!, {r5 - r8} + UNWIND( .fnend ) @ end of second stmfd block + + UNWIND( .fnstart ) + UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block 8: movs r2, r2, lsl #31 ldrneb r3, [r1, #-1]! @@ -124,10 +139,13 @@ ENTRY(memmove) ldr r3, [r1, #0] beq 17f blt 18f + UNWIND( .fnend ) .macro backward_copy_shift push pull + UNWIND( .fnstart ) + UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block subs r2, r2, #28 blt 14f @@ -137,6 +155,11 @@ ENTRY(memmove) CALGN( bcc 15f ) 11: stmfd sp!, {r5 - r9} + UNWIND( .fnend ) + + UNWIND( .fnstart ) + UNWIND( .save {r0, r4, lr} ) + UNWIND( .save {r5 - r9} ) @ in new second stmfd block PLD( pld [r1, #-4] ) PLD( subs r2, r2, #96 ) @@ -171,6 +194,10 @@ ENTRY(memmove) PLD( bge 13b ) ldmfd sp!, {r5 - r9} + UNWIND( .fnend ) @ end of the second stmfd block + + UNWIND( .fnstart ) + UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block 14: ands ip, r2, #28 beq 16f @@ -186,6 +213,7 @@ ENTRY(memmove) 16: add r1, r1, #(\pull / 8) b 8b + UNWIND( .fnend ) .endm diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 671455c854fa..a4ee97b5a2bf 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -11,11 +11,13 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> .text .align 5 ENTRY(memset) +UNWIND( .fnstart ) ands r3, r0, #3 @ 1 unaligned? mov ip, r0 @ preserve r0 as return value bne 6f @ 1 @@ -34,6 +36,9 @@ ENTRY(memset) * We need 2 extra registers for this loop - use r8 and the LR */ stmfd sp!, {r8, lr} +UNWIND( .fnend ) +UNWIND( .fnstart ) +UNWIND( .save {r8, lr} ) mov r8, r1 mov lr, r1 @@ -53,6 +58,7 @@ ENTRY(memset) tst r2, #16 stmneia ip!, {r1, r3, r8, lr} ldmfd sp!, {r8, lr} +UNWIND( .fnend ) #else @@ -62,6 +68,9 @@ ENTRY(memset) */ stmfd sp!, {r4-r8, lr} +UNWIND( .fnend ) +UNWIND( .fnstart ) +UNWIND( .save {r4-r8, lr} ) mov r4, r1 mov r5, r1 mov r6, r1 @@ -94,9 +103,11 @@ ENTRY(memset) tst r2, #16 stmneia ip!, {r4-r7} ldmfd sp!, {r4-r8, lr} +UNWIND( .fnend ) #endif +UNWIND( .fnstart ) 4: tst r2, #8 stmneia ip!, {r1, r3} tst r2, #4 @@ -120,4 +131,5 @@ ENTRY(memset) strb r1, [ip], #1 @ 1 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) b 1b +UNWIND( .fnend ) ENDPROC(memset) diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S index 385ccb306fa2..0eded952e089 100644 --- a/arch/arm/lib/memzero.S +++ b/arch/arm/lib/memzero.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> .text .align 5 @@ -18,6 +19,7 @@ * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we * don't bother; we use byte stores instead. */ +UNWIND( .fnstart ) 1: subs r1, r1, #4 @ 1 do we have enough blt 5f @ 1 bytes to align with? cmp r3, #2 @ 1 @@ -47,6 +49,9 @@ ENTRY(__memzero) * use the LR */ str lr, [sp, #-4]! @ 1 +UNWIND( .fnend ) +UNWIND( .fnstart ) +UNWIND( .save {lr} ) mov ip, r2 @ 1 mov lr, r2 @ 1 @@ -66,6 +71,7 @@ ENTRY(__memzero) tst r1, #16 @ 1 16 bytes or more? stmneia r0!, {r2, r3, ip, lr} @ 4 ldr lr, [sp], #4 @ 1 +UNWIND( .fnend ) #else @@ -75,6 +81,9 @@ ENTRY(__memzero) */ stmfd sp!, {r4-r7, lr} +UNWIND( .fnend ) +UNWIND( .fnstart ) +UNWIND( .save {r4-r7, lr} ) mov r4, r2 mov r5, r2 mov r6, r2 @@ -105,9 +114,11 @@ ENTRY(__memzero) tst r1, #16 stmneia r0!, {r4-r7} ldmfd sp!, {r4-r7, lr} +UNWIND( .fnend ) #endif +UNWIND( .fnstart ) 4: tst r1, #8 @ 1 8 bytes or more? stmneia r0!, {r2, r3} @ 2 tst r1, #4 @ 1 4 bytes or more? @@ -122,4 +133,5 @@ ENTRY(__memzero) tst r1, #1 @ 1 a byte left over strneb r2, [r0], #1 @ 1 ret lr @ 1 +UNWIND( .fnend ) ENDPROC(__memzero) diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index 1bd39b45d08b..aaeec78c3ec4 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -143,6 +143,7 @@ config ARCH_BRCMSTB select HAVE_ARM_ARCH_TIMER select BRCMSTB_GISB_ARB select BRCMSTB_L2_IRQ + select BCM7120_L2_IRQ help Say Y if you intend to run the kernel on a Broadcom ARM-based STB chipset. diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 06d63d5651f3..b46b4d25f93e 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -294,7 +294,7 @@ static struct vpbe_output dm355evm_vpbe_outputs[] = { .default_mode = "ntsc", .num_modes = ARRAY_SIZE(dm355evm_enc_preset_timing), .modes = dm355evm_enc_preset_timing, - .if_params = V4L2_MBUS_FMT_FIXED, + .if_params = MEDIA_BUS_FMT_FIXED, }, }; diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index e08a8684ead2..a756003595e9 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -485,7 +485,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = { .default_mode = "ntsc", .num_modes = ARRAY_SIZE(dm365evm_enc_std_timing), .modes = dm365evm_enc_std_timing, - .if_params = V4L2_MBUS_FMT_FIXED, + .if_params = MEDIA_BUS_FMT_FIXED, }, { .output = { @@ -498,7 +498,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = { .default_mode = "480p59_94", .num_modes = ARRAY_SIZE(dm365evm_enc_preset_timing), .modes = dm365evm_enc_preset_timing, - .if_params = V4L2_MBUS_FMT_FIXED, + .if_params = MEDIA_BUS_FMT_FIXED, }, }; diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index f1ac1c94ac0f..e365c1bb1265 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c @@ -66,7 +66,6 @@ static struct cpuidle_driver davinci_idle_driver = { .enter = davinci_enter_idle, .exit_latency = 10, .target_residency = 10000, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "DDR SR", .desc = "WFI and DDR Self Refresh", }, @@ -92,7 +91,6 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) static struct platform_driver davinci_cpuidle_driver = { .driver = { .name = "cpuidle-davinci", - .owner = THIS_MODULE, }, }; diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 2f3ed3a58d57..9cbeda798584 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -785,14 +785,13 @@ static struct resource dm355_v4l2_disp_resources[] = { }, }; -static int dm355_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type, - int field) +static int dm355_vpbe_setup_pinmux(u32 if_type, int field) { switch (if_type) { - case V4L2_MBUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: davinci_cfg_reg(DM355_VOUT_FIELD_G70); break; - case V4L2_MBUS_FMT_YUYV10_1X20: + case MEDIA_BUS_FMT_YUYV10_1X20: if (field) davinci_cfg_reg(DM355_VOUT_FIELD); else diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 0ae8114f5cc9..e3a3c54b6832 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -1306,16 +1306,15 @@ static struct resource dm365_v4l2_disp_resources[] = { }, }; -static int dm365_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type, - int field) +static int dm365_vpbe_setup_pinmux(u32 if_type, int field) { switch (if_type) { - case V4L2_MBUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: davinci_cfg_reg(DM365_VOUT_FIELD_G81); davinci_cfg_reg(DM365_VOUT_COUTL_EN); davinci_cfg_reg(DM365_VOUT_COUTH_EN); break; - case V4L2_MBUS_FMT_YUYV10_1X20: + case MEDIA_BUS_FMT_YUYV10_1X20: if (field) davinci_cfg_reg(DM365_VOUT_FIELD); else diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c index a508fe587af7..07e23ba61f3a 100644 --- a/arch/arm/mach-davinci/pm.c +++ b/arch/arm/mach-davinci/pm.c @@ -148,7 +148,6 @@ static int __exit davinci_pm_remove(struct platform_device *pdev) static struct platform_driver davinci_pm_driver = { .driver = { .name = "pm-davinci", - .owner = THIS_MODULE, }, .remove = __exit_p(davinci_pm_remove), }; diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index b9e3f1c61baf..e4a00bafffc1 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -34,7 +34,7 @@ config ARCH_EXYNOS3 bool "SAMSUNG EXYNOS3" select ARM_CPU_SUSPEND if PM help - Samsung EXYNOS3 (Crotex-A7) SoC based systems + Samsung EXYNOS3 (Cortex-A7) SoC based systems config ARCH_EXYNOS4 bool "SAMSUNG EXYNOS4" diff --git a/arch/arm/mach-imx/cpuidle-imx5.c b/arch/arm/mach-imx/cpuidle-imx5.c index 5a47e3c6172f..3feca526d16b 100644 --- a/arch/arm/mach-imx/cpuidle-imx5.c +++ b/arch/arm/mach-imx/cpuidle-imx5.c @@ -24,7 +24,6 @@ static struct cpuidle_driver imx5_cpuidle_driver = { .enter = imx5_cpuidle_enter, .exit_latency = 2, .target_residency = 1, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "IMX5 SRPG", .desc = "CPU state retained,powered off", }, diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index aa935787b743..d76d08623f9f 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -53,8 +53,7 @@ static struct cpuidle_driver imx6q_cpuidle_driver = { { .exit_latency = 50, .target_residency = 75, - .flags = CPUIDLE_FLAG_TIME_VALID | - CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIMER_STOP, .enter = imx6q_enter_wait, .name = "WAIT", .desc = "Clock off", diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c index d4b6b8171fa9..7d92e6584551 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sl.c +++ b/arch/arm/mach-imx/cpuidle-imx6sl.c @@ -40,8 +40,7 @@ static struct cpuidle_driver imx6sl_cpuidle_driver = { { .exit_latency = 50, .target_residency = 75, - .flags = CPUIDLE_FLAG_TIME_VALID | - CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIMER_STOP, .enter = imx6sl_enter_wait, .name = "WAIT", .desc = "Clock off", diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index 3729d90cfa46..a377f95033ae 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -76,7 +76,6 @@ static struct of_device_id imx_mmdc_dt_ids[] = { static struct platform_driver imx_mmdc_driver = { .driver = { .name = "imx-mmdc", - .owner = THIS_MODULE, .of_match_table = imx_mmdc_dt_ids, }, .probe = imx_mmdc_probe, diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index e7730cf9c15d..9f89e76dfbb9 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c @@ -126,10 +126,10 @@ static void iop13xx_msi_nop(struct irq_data *d) static struct irq_chip iop13xx_msi_chip = { .name = "PCI-MSI", .irq_ack = iop13xx_msi_nop, - .irq_enable = unmask_msi_irq, - .irq_disable = mask_msi_irq, - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, }; int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) @@ -153,7 +153,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) id = iop13xx_cpu_id(); msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); return 0; diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d81b2475e67e..22762a1f9f72 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -158,6 +158,7 @@ struct pxa168_eth_platform_data gplugd_eth_platform_data = { .port_number = 0, .phy_addr = 0, .speed = 0, /* Autonagotiation */ + .intf = PHY_INTERFACE_MODE_RMII, .init = gplugd_eth_init, }; diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c index 9a80449518e6..f5b69d736ee5 100644 --- a/arch/arm/mach-msm/clock-pcom.c +++ b/arch/arm/mach-msm/clock-pcom.c @@ -169,7 +169,6 @@ static struct platform_driver msm_clock_pcom_driver = { .probe = msm_clock_pcom_probe, .driver = { .name = "msm-clock-pcom", - .owner = THIS_MODULE, }, }; module_platform_driver(msm_clock_pcom_driver); diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c index b1588a1ea2f8..7550f5a08956 100644 --- a/arch/arm/mach-msm/smd.c +++ b/arch/arm/mach-msm/smd.c @@ -1019,7 +1019,6 @@ static struct platform_driver msm_smd_driver = { .probe = msm_smd_probe, .driver = { .name = MODULE_NAME, - .owner = THIS_MODULE, }, }; diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 6e249324fdd7..f0edec199cd4 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -278,14 +278,6 @@ config MACH_SBC3530 default y select OMAP_PACKAGE_CUS -config OMAP3_EMU - bool "OMAP3 debugging peripherals" - depends on ARCH_OMAP3 - select ARM_AMBA - select OC_ETM - help - Say Y here to enable debugging hardware of omap3 - config OMAP3_SDRC_AC_TIMING bool "Enable SDRC AC timing register changes" depends on ARCH_OMAP3 diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 08cc94474d17..5d27dfdef66b 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -228,7 +228,6 @@ obj-$(CONFIG_SOC_OMAP5) += omap_hwmod_54xx_data.o obj-$(CONFIG_SOC_DRA7XX) += omap_hwmod_7xx_data.o # EMU peripherals -obj-$(CONFIG_OMAP3_EMU) += emu.o obj-$(CONFIG_HW_PERF_EVENTS) += pmu.o iommu-$(CONFIG_OMAP_IOMMU) := omap-iommu.o diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 03502abe4f2e..14edcd7a2a1d 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -23,6 +23,7 @@ #include <linux/regulator/machine.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> +#include <linux/gpio/machine.h> #include <linux/omap-gpmc.h> #include <linux/mmc/host.h> #include <linux/power/isp1704_charger.h> @@ -38,7 +39,6 @@ #include <sound/tlv320aic3x.h> #include <sound/tpa6130a2-plat.h> -#include <media/radio-si4713.h> #include <media/si4713.h> #include <linux/platform_data/leds-lp55xx.h> @@ -756,46 +756,17 @@ static struct regulator_init_data rx51_vintdig = { }, }; -static const char * const si4713_supply_names[] = { - "vio", - "vdd", -}; - -static struct si4713_platform_data rx51_si4713_i2c_data __initdata_or_module = { - .supplies = ARRAY_SIZE(si4713_supply_names), - .supply_names = si4713_supply_names, - .gpio_reset = RX51_FMTX_RESET_GPIO, -}; - -static struct i2c_board_info rx51_si4713_board_info __initdata_or_module = { - I2C_BOARD_INFO("si4713", SI4713_I2C_ADDR_BUSEN_HIGH), - .platform_data = &rx51_si4713_i2c_data, -}; - -static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module = { - .i2c_bus = 2, - .subdev_board_info = &rx51_si4713_board_info, -}; - -static struct platform_device rx51_si4713_dev __initdata_or_module = { - .name = "radio-si4713", - .id = -1, - .dev = { - .platform_data = &rx51_si4713_data, +static struct gpiod_lookup_table rx51_fmtx_gpios_table = { + .dev_id = "2-0063", + .table = { + GPIO_LOOKUP("gpio.6", 3, "reset", GPIO_ACTIVE_HIGH), /* 163 */ + { }, }, }; -static __init void rx51_init_si4713(void) +static __init void rx51_gpio_init(void) { - int err; - - err = gpio_request_one(RX51_FMTX_IRQ, GPIOF_DIR_IN, "si4713 irq"); - if (err) { - printk(KERN_ERR "Cannot request si4713 irq gpio. %d\n", err); - return; - } - rx51_si4713_board_info.irq = gpio_to_irq(RX51_FMTX_IRQ); - platform_device_register(&rx51_si4713_dev); + gpiod_add_lookup_table(&rx51_fmtx_gpios_table); } static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n) @@ -1025,7 +996,19 @@ static struct aic3x_pdata rx51_aic3x_data2 = { .gpio_reset = 60, }; +#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713) +static struct si4713_platform_data rx51_si4713_platform_data = { + .is_platform_device = true +}; +#endif + static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = { +#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713) + { + I2C_BOARD_INFO("si4713", 0x63), + .platform_data = &rx51_si4713_platform_data, + }, +#endif { I2C_BOARD_INFO("tlv320aic3x", 0x18), .platform_data = &rx51_aic3x_data, @@ -1066,6 +1049,10 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = { static int __init rx51_i2c_init(void) { +#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713) + int err; +#endif + if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) || system_rev >= SYSTEM_REV_B_USES_VAUX3) { rx51_twldata.vaux3 = &rx51_vaux3_mmc; @@ -1083,6 +1070,14 @@ static int __init rx51_i2c_init(void) rx51_twldata.vdac->constraints.name = "VDAC"; omap_pmic_init(1, 2200, "twl5030", 7 + OMAP_INTC_START, &rx51_twldata); +#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713) + err = gpio_request_one(RX51_FMTX_IRQ, GPIOF_DIR_IN, "si4713 irq"); + if (err) { + printk(KERN_ERR "Cannot request si4713 irq gpio. %d\n", err); + return err; + } + rx51_peripherals_i2c_board_info_2[0].irq = gpio_to_irq(RX51_FMTX_IRQ); +#endif omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); #if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE) @@ -1269,13 +1264,13 @@ static void __init rx51_init_omap3_rom_rng(void) void __init rx51_peripherals_init(void) { + rx51_gpio_init(); rx51_i2c_init(); regulator_has_full_constraints(); gpmc_onenand_init(board_onenand_data); rx51_add_gpio_keys(); rx51_init_wl1251(); rx51_init_tsc2005(); - rx51_init_si4713(); rx51_init_lirc(); spi_register_board_info(rx51_peripherals_spi_board_info, ARRAY_SIZE(rx51_peripherals_spi_board_info)); diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index e18709d3b95d..aa7b379e2661 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -265,7 +265,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 2 + 2, .target_residency = 5, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C1", .desc = "MPU ON + CORE ON", }, @@ -273,7 +272,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 10 + 10, .target_residency = 30, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C2", .desc = "MPU ON + CORE ON", }, @@ -281,7 +279,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 50 + 50, .target_residency = 300, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C3", .desc = "MPU RET + CORE ON", }, @@ -289,7 +286,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 1500 + 1800, .target_residency = 4000, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C4", .desc = "MPU OFF + CORE ON", }, @@ -297,7 +293,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 2500 + 7500, .target_residency = 12000, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C5", .desc = "MPU RET + CORE RET", }, @@ -305,7 +300,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 3000 + 8500, .target_residency = 15000, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C6", .desc = "MPU OFF + CORE RET", }, @@ -313,7 +307,6 @@ static struct cpuidle_driver omap3_idle_driver = { .enter = omap3_enter_idle_bm, .exit_latency = 10000 + 30000, .target_residency = 30000, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C7", .desc = "MPU OFF + CORE OFF", }, diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 2498ab025fa2..01e398a868bc 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -196,7 +196,6 @@ static struct cpuidle_driver omap4_idle_driver = { /* C1 - CPU0 ON + CPU1 ON + MPU ON */ .exit_latency = 2 + 2, .target_residency = 5, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = omap_enter_idle_simple, .name = "C1", .desc = "CPUx ON, MPUSS ON" @@ -205,7 +204,7 @@ static struct cpuidle_driver omap4_idle_driver = { /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, + .flags = CPUIDLE_FLAG_COUPLED, .enter = omap_enter_idle_coupled, .name = "C2", .desc = "CPUx OFF, MPUSS CSWR", @@ -214,7 +213,7 @@ static struct cpuidle_driver omap4_idle_driver = { /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, + .flags = CPUIDLE_FLAG_COUPLED, .enter = omap_enter_idle_coupled, .name = "C3", .desc = "CPUx OFF, MPUSS OSWR", diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index a7bc4ce81e19..1afb50d6d636 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -67,28 +67,6 @@ static int __init omap3_l3_init(void) } omap_postcore_initcall(omap3_l3_init); -#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE) - -static struct resource omap2cam_resources[] = { - { - .start = OMAP24XX_CAMERA_BASE, - .end = OMAP24XX_CAMERA_BASE + 0xfff, - .flags = IORESOURCE_MEM, - }, - { - .start = 24 + OMAP_INTC_START, - .flags = IORESOURCE_IRQ, - } -}; - -static struct platform_device omap2cam_device = { - .name = "omap24xxcam", - .id = -1, - .num_resources = ARRAY_SIZE(omap2cam_resources), - .resource = omap2cam_resources, -}; -#endif - #if defined(CONFIG_IOMMU_API) #include <linux/platform_data/iommu-omap.h> @@ -211,14 +189,6 @@ int omap3_init_camera(struct isp_platform_data *pdata) #endif -static inline void omap_init_camera(void) -{ -#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE) - if (cpu_is_omap24xx()) - platform_device_register(&omap2cam_device); -#endif -} - #if defined(CONFIG_OMAP2PLUS_MBOX) || defined(CONFIG_OMAP2PLUS_MBOX_MODULE) static inline void __init omap_init_mbox(void) { @@ -397,7 +367,6 @@ static int __init omap2_init_devices(void) * in alphabetical order so they're easier to sort through. */ omap_init_audio(); - omap_init_camera(); /* If dtb is there, the devices will be created dynamically */ if (!of_have_populated_dt()) { omap_init_mbox(); diff --git a/arch/arm/mach-omap2/emu.c b/arch/arm/mach-omap2/emu.c deleted file mode 100644 index cbeaca2d7695..000000000000 --- a/arch/arm/mach-omap2/emu.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * emu.c - * - * ETM and ETB CoreSight components' resources as found in OMAP3xxx. - * - * Copyright (C) 2009 Nokia Corporation. - * Alexander Shishkin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/amba/bus.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/err.h> - -#include "soc.h" -#include "iomap.h" - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Alexander Shishkin"); - -/* Cortex CoreSight components within omap3xxx EMU */ -#define ETM_BASE (L4_EMU_34XX_PHYS + 0x10000) -#define DBG_BASE (L4_EMU_34XX_PHYS + 0x11000) -#define ETB_BASE (L4_EMU_34XX_PHYS + 0x1b000) -#define DAPCTL (L4_EMU_34XX_PHYS + 0x1d000) - -static AMBA_APB_DEVICE(omap3_etb, "etb", 0x000bb907, ETB_BASE, { }, NULL); -static AMBA_APB_DEVICE(omap3_etm, "etm", 0x102bb921, ETM_BASE, { }, NULL); - -static int __init emu_init(void) -{ - if (!cpu_is_omap34xx()) - return -ENODEV; - - amba_device_register(&omap3_etb_device, &iomem_resource); - amba_device_register(&omap3_etm_device, &iomem_resource); - - return 0; -} - -omap_subsys_initcall(emu_init); diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index b0d54dae1bcb..4457e731f7a4 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c @@ -91,18 +91,8 @@ void __init omap_pmic_late_init(void) } #if defined(CONFIG_ARCH_OMAP3) -struct phy_consumer consumers[] = { - PHY_CONSUMER("musb-hdrc.0", "usb"), -}; - -struct phy_init_data init_data = { - .consumers = consumers, - .num_consumers = ARRAY_SIZE(consumers), -}; - static struct twl4030_usb_data omap3_usb_pdata = { - .usb_mode = T2_USB_MODE_ULPI, - .init_data = &init_data, + .usb_mode = T2_USB_MODE_ULPI, }; static int omap3_batt_table[] = { diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index 96e9bc102117..d99d08eeb966 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c @@ -135,7 +135,6 @@ static struct platform_driver sirfsoc_memc_driver = { .probe = sirfsoc_memc_probe, .driver = { .name = "sirfsoc-memc", - .owner = THIS_MODULE, .of_match_table = memc_ids, }, }; diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c index 3dffcb2d714e..e1f1f86f6a95 100644 --- a/arch/arm/mach-prima2/rstc.c +++ b/arch/arm/mach-prima2/rstc.c @@ -114,7 +114,6 @@ static struct platform_driver sirfsoc_rstc_driver = { .probe = sirfsoc_rstc_probe, .driver = { .name = "sirfsoc_rstc", - .owner = THIS_MODULE, .of_match_table = rstc_ids, }, }; diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c index a17c88b74fa1..70a0b475062b 100644 --- a/arch/arm/mach-prima2/rtciobrg.c +++ b/arch/arm/mach-prima2/rtciobrg.c @@ -123,7 +123,6 @@ static struct platform_driver sirfsoc_rtciobrg_driver = { .probe = sirfsoc_rtciobrg_probe, .driver = { .name = "sirfsoc-rtciobrg", - .owner = THIS_MODULE, .of_match_table = rtciobrg_ids, }, }; diff --git a/arch/arm/mach-pxa/pxa3xx-ulpi.c b/arch/arm/mach-pxa/pxa3xx-ulpi.c index 614003e8b081..1c85275cb768 100644 --- a/arch/arm/mach-pxa/pxa3xx-ulpi.c +++ b/arch/arm/mach-pxa/pxa3xx-ulpi.c @@ -379,7 +379,6 @@ static int pxa3xx_u2d_remove(struct platform_device *pdev) static struct platform_driver pxa3xx_u2d_ulpi_driver = { .driver = { .name = "pxa3xx-u2d", - .owner = THIS_MODULE, }, .probe = pxa3xx_u2d_probe, .remove = pxa3xx_u2d_remove, diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 840c3a48e720..962a7f31f596 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -924,6 +924,14 @@ static inline void spitz_i2c_init(void) {} #endif /****************************************************************************** + * Audio devices + ******************************************************************************/ +static inline void spitz_audio_init(void) +{ + platform_device_register_simple("spitz-audio", -1, NULL, 0); +} + +/****************************************************************************** * Machine init ******************************************************************************/ static void spitz_poweroff(void) @@ -970,6 +978,7 @@ static void __init spitz_init(void) spitz_nor_init(); spitz_nand_init(); spitz_i2c_init(); + spitz_audio_init(); } static void __init spitz_fixup(struct tag *tags, char **cmdline) diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index fc3646c2c694..685deff861d2 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -129,7 +129,6 @@ static struct platform_driver tosa_bt_driver = { .driver = { .name = "tosa-bt", - .owner = THIS_MODULE, }, }; diff --git a/arch/arm/mach-s3c24xx/h1940-bluetooth.c b/arch/arm/mach-s3c24xx/h1940-bluetooth.c index b4d14b864367..9c8b1279a4ba 100644 --- a/arch/arm/mach-s3c24xx/h1940-bluetooth.c +++ b/arch/arm/mach-s3c24xx/h1940-bluetooth.c @@ -41,7 +41,7 @@ static void h1940bt_enable(int on) mdelay(10); gpio_set_value(S3C2410_GPH(1), 0); - h1940_led_blink_set(-EINVAL, GPIO_LED_BLINK, NULL, NULL); + h1940_led_blink_set(NULL, GPIO_LED_BLINK, NULL, NULL); } else { gpio_set_value(S3C2410_GPH(1), 1); @@ -50,7 +50,7 @@ static void h1940bt_enable(int on) mdelay(10); gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 0); - h1940_led_blink_set(-EINVAL, GPIO_LED_NO_BLINK_LOW, NULL, NULL); + h1940_led_blink_set(NULL, GPIO_LED_NO_BLINK_LOW, NULL, NULL); } } diff --git a/arch/arm/mach-s3c24xx/h1940.h b/arch/arm/mach-s3c24xx/h1940.h index 2950cc466840..596d9f64c5b6 100644 --- a/arch/arm/mach-s3c24xx/h1940.h +++ b/arch/arm/mach-s3c24xx/h1940.h @@ -19,8 +19,10 @@ #define H1940_SUSPEND_RESUMEAT (0x30081000) #define H1940_SUSPEND_CHECK (0x30080000) +struct gpio_desc; + extern void h1940_pm_return(void); -extern int h1940_led_blink_set(unsigned gpio, int state, +extern int h1940_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c index d35ddc1d9991..d40d4f5244c6 100644 --- a/arch/arm/mach-s3c24xx/mach-h1940.c +++ b/arch/arm/mach-s3c24xx/mach-h1940.c @@ -359,10 +359,11 @@ static struct platform_device h1940_battery = { static DEFINE_SPINLOCK(h1940_blink_spin); -int h1940_led_blink_set(unsigned gpio, int state, +int h1940_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off) { int blink_gpio, check_gpio1, check_gpio2; + int gpio = desc ? desc_to_gpio(desc) : -EINVAL; switch (gpio) { case H1940_LATCH_LED_GREEN: diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c index 33afb9190091..ce2db235dbaf 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c @@ -171,7 +171,6 @@ static struct platform_driver osiris_dvs_driver = { .remove = osiris_dvs_remove, .driver = { .name = "osiris-dvs", - .owner = THIS_MODULE, .pm = &osiris_dvs_pm, }, }; diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c index c3f2682d0c62..1d35ff375a01 100644 --- a/arch/arm/mach-s3c24xx/mach-rx1950.c +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c @@ -250,9 +250,10 @@ static void rx1950_disable_charger(void) static DEFINE_SPINLOCK(rx1950_blink_spin); -static int rx1950_led_blink_set(unsigned gpio, int state, +static int rx1950_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off) { + int gpio = desc_to_gpio(desc); int blink_gpio, check_gpio; switch (gpio) { diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c index 3c8ab07c2012..2eb072440dfa 100644 --- a/arch/arm/mach-s3c64xx/cpuidle.c +++ b/arch/arm/mach-s3c64xx/cpuidle.c @@ -48,7 +48,6 @@ static struct cpuidle_driver s3c64xx_cpuidle_driver = { .enter = s3c64xx_enter_idle, .exit_latency = 1, .target_residency = 1, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "IDLE", .desc = "System active, ARM gated", }, diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c index 9fa6a990cf03..03c75a811cb0 100644 --- a/arch/arm/mach-sa1100/clock.c +++ b/arch/arm/mach-sa1100/clock.c @@ -15,10 +15,12 @@ #include <linux/clkdev.h> #include <mach/hardware.h> +#include <mach/generic.h> struct clkops { void (*enable)(struct clk *); void (*disable)(struct clk *); + unsigned long (*get_rate)(struct clk *); }; struct clk { @@ -33,13 +35,6 @@ struct clk clk_##_name = { \ static DEFINE_SPINLOCK(clocks_lock); -/* Dummy clk routine to build generic kernel parts that may be using them */ -unsigned long clk_get_rate(struct clk *clk) -{ - return 0; -} -EXPORT_SYMBOL(clk_get_rate); - static void clk_gpio27_enable(struct clk *clk) { /* @@ -58,6 +53,19 @@ static void clk_gpio27_disable(struct clk *clk) GAFR &= ~GPIO_32_768kHz; } +static void clk_cpu_enable(struct clk *clk) +{ +} + +static void clk_cpu_disable(struct clk *clk) +{ +} + +static unsigned long clk_cpu_get_rate(struct clk *clk) +{ + return sa11x0_getspeed(0) * 1000; +} + int clk_enable(struct clk *clk) { unsigned long flags; @@ -87,16 +95,37 @@ void clk_disable(struct clk *clk) } EXPORT_SYMBOL(clk_disable); +unsigned long clk_get_rate(struct clk *clk) +{ + if (clk && clk->ops && clk->ops->get_rate) + return clk->ops->get_rate(clk); + + return 0; +} +EXPORT_SYMBOL(clk_get_rate); + const struct clkops clk_gpio27_ops = { .enable = clk_gpio27_enable, .disable = clk_gpio27_disable, }; +const struct clkops clk_cpu_ops = { + .enable = clk_cpu_enable, + .disable = clk_cpu_disable, + .get_rate = clk_cpu_get_rate, +}; + static DEFINE_CLK(gpio27, &clk_gpio27_ops); +static DEFINE_CLK(cpu, &clk_cpu_ops); + static struct clk_lookup sa11xx_clkregs[] = { CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27), CLKDEV_INIT("sa1100-rtc", NULL, NULL), + CLKDEV_INIT("sa11x0-fb", NULL, &clk_cpu), + CLKDEV_INIT("sa11x0-pcmcia", NULL, &clk_cpu), + /* sa1111 names devices using internal offsets, PCMCIA is at 0x1800 */ + CLKDEV_INIT("1800", NULL, &clk_cpu), }; static int __init sa11xx_clk_init(void) diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index 108939f8d053..b90c7d828391 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -30,7 +30,7 @@ #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/gpio.h> -#include <linux/pda_power.h> +#include <linux/power/gpio-charger.h> #include <video/sa1100fb.h> @@ -131,62 +131,24 @@ static struct irda_platform_data collie_ir_data = { /* * Collie AC IN */ -static int collie_power_init(struct device *dev) -{ - int ret = gpio_request(COLLIE_GPIO_AC_IN, "ac in"); - if (ret) - goto err_gpio_req; - - ret = gpio_direction_input(COLLIE_GPIO_AC_IN); - if (ret) - goto err_gpio_in; - - return 0; - -err_gpio_in: - gpio_free(COLLIE_GPIO_AC_IN); -err_gpio_req: - return ret; -} - -static void collie_power_exit(struct device *dev) -{ - gpio_free(COLLIE_GPIO_AC_IN); -} - -static int collie_power_ac_online(void) -{ - return gpio_get_value(COLLIE_GPIO_AC_IN) == 2; -} - static char *collie_ac_supplied_to[] = { "main-battery", "backup-battery", }; -static struct pda_power_pdata collie_power_data = { - .init = collie_power_init, - .is_ac_online = collie_power_ac_online, - .exit = collie_power_exit, + +static struct gpio_charger_platform_data collie_power_data = { + .name = "charger", + .type = POWER_SUPPLY_TYPE_MAINS, + .gpio = COLLIE_GPIO_AC_IN, .supplied_to = collie_ac_supplied_to, .num_supplicants = ARRAY_SIZE(collie_ac_supplied_to), }; -static struct resource collie_power_resource[] = { - { - .name = "ac", - .flags = IORESOURCE_IRQ | - IORESOURCE_IRQ_HIGHEDGE | - IORESOURCE_IRQ_LOWEDGE, - }, -}; - static struct platform_device collie_power_device = { - .name = "pda-power", + .name = "gpio-charger", .id = -1, .dev.platform_data = &collie_power_data, - .resource = collie_power_resource, - .num_resources = ARRAY_SIZE(collie_power_resource), }; #ifdef CONFIG_SHARP_LOCOMO @@ -420,9 +382,6 @@ static void __init collie_init(void) GPSR |= _COLLIE_GPIO_UCB1x00_RESET; - collie_power_resource[0].start = gpio_to_irq(COLLIE_GPIO_AC_IN); - collie_power_resource[0].end = gpio_to_irq(COLLIE_GPIO_AC_IN); - sa11x0_ppc_configure_mcp(); diff --git a/arch/arm/mach-sa1100/include/mach/entry-macro.S b/arch/arm/mach-sa1100/include/mach/entry-macro.S deleted file mode 100644 index 8cf7630bf024..000000000000 --- a/arch/arm/mach-sa1100/include/mach/entry-macro.S +++ /dev/null @@ -1,41 +0,0 @@ -/* - * arch/arm/mach-sa1100/include/mach/entry-macro.S - * - * Low-level IRQ helper macros for SA1100-based platforms - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - - .macro get_irqnr_preamble, base, tmp - mov \base, #0xfa000000 @ ICIP = 0xfa050000 - add \base, \base, #0x00050000 - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \irqstat, [\base] @ get irqs - ldr \irqnr, [\base, #4] @ ICMR = 0xfa050004 - ands \irqstat, \irqstat, \irqnr - mov \irqnr, #0 - beq 1001f - tst \irqstat, #0xff - moveq \irqstat, \irqstat, lsr #8 - addeq \irqnr, \irqnr, #8 - tsteq \irqstat, #0xff - moveq \irqstat, \irqstat, lsr #8 - addeq \irqnr, \irqnr, #8 - tsteq \irqstat, #0xff - moveq \irqstat, \irqstat, lsr #8 - addeq \irqnr, \irqnr, #8 - tst \irqstat, #0x0f - moveq \irqstat, \irqstat, lsr #4 - addeq \irqnr, \irqnr, #4 - tst \irqstat, #0x03 - moveq \irqstat, \irqstat, lsr #2 - addeq \irqnr, \irqnr, #2 - tst \irqstat, #0x01 - addeqs \irqnr, \irqnr, #1 -1001: - .endm - diff --git a/arch/arm/mach-sa1100/include/mach/irqs.h b/arch/arm/mach-sa1100/include/mach/irqs.h index 3790298b7142..de0983494c7e 100644 --- a/arch/arm/mach-sa1100/include/mach/irqs.h +++ b/arch/arm/mach-sa1100/include/mach/irqs.h @@ -8,56 +8,56 @@ * 2001/11/14 RMK Cleaned up and standardised a lot of the IRQs. */ -#define IRQ_GPIO0 0 -#define IRQ_GPIO1 1 -#define IRQ_GPIO2 2 -#define IRQ_GPIO3 3 -#define IRQ_GPIO4 4 -#define IRQ_GPIO5 5 -#define IRQ_GPIO6 6 -#define IRQ_GPIO7 7 -#define IRQ_GPIO8 8 -#define IRQ_GPIO9 9 -#define IRQ_GPIO10 10 -#define IRQ_GPIO11_27 11 -#define IRQ_LCD 12 /* LCD controller */ -#define IRQ_Ser0UDC 13 /* Ser. port 0 UDC */ -#define IRQ_Ser1SDLC 14 /* Ser. port 1 SDLC */ -#define IRQ_Ser1UART 15 /* Ser. port 1 UART */ -#define IRQ_Ser2ICP 16 /* Ser. port 2 ICP */ -#define IRQ_Ser3UART 17 /* Ser. port 3 UART */ -#define IRQ_Ser4MCP 18 /* Ser. port 4 MCP */ -#define IRQ_Ser4SSP 19 /* Ser. port 4 SSP */ -#define IRQ_DMA0 20 /* DMA controller channel 0 */ -#define IRQ_DMA1 21 /* DMA controller channel 1 */ -#define IRQ_DMA2 22 /* DMA controller channel 2 */ -#define IRQ_DMA3 23 /* DMA controller channel 3 */ -#define IRQ_DMA4 24 /* DMA controller channel 4 */ -#define IRQ_DMA5 25 /* DMA controller channel 5 */ -#define IRQ_OST0 26 /* OS Timer match 0 */ -#define IRQ_OST1 27 /* OS Timer match 1 */ -#define IRQ_OST2 28 /* OS Timer match 2 */ -#define IRQ_OST3 29 /* OS Timer match 3 */ -#define IRQ_RTC1Hz 30 /* RTC 1 Hz clock */ -#define IRQ_RTCAlrm 31 /* RTC Alarm */ +#define IRQ_GPIO0 1 +#define IRQ_GPIO1 2 +#define IRQ_GPIO2 3 +#define IRQ_GPIO3 4 +#define IRQ_GPIO4 5 +#define IRQ_GPIO5 6 +#define IRQ_GPIO6 7 +#define IRQ_GPIO7 8 +#define IRQ_GPIO8 9 +#define IRQ_GPIO9 10 +#define IRQ_GPIO10 11 +#define IRQ_GPIO11_27 12 +#define IRQ_LCD 13 /* LCD controller */ +#define IRQ_Ser0UDC 14 /* Ser. port 0 UDC */ +#define IRQ_Ser1SDLC 15 /* Ser. port 1 SDLC */ +#define IRQ_Ser1UART 16 /* Ser. port 1 UART */ +#define IRQ_Ser2ICP 17 /* Ser. port 2 ICP */ +#define IRQ_Ser3UART 18 /* Ser. port 3 UART */ +#define IRQ_Ser4MCP 19 /* Ser. port 4 MCP */ +#define IRQ_Ser4SSP 20 /* Ser. port 4 SSP */ +#define IRQ_DMA0 21 /* DMA controller channel 0 */ +#define IRQ_DMA1 22 /* DMA controller channel 1 */ +#define IRQ_DMA2 23 /* DMA controller channel 2 */ +#define IRQ_DMA3 24 /* DMA controller channel 3 */ +#define IRQ_DMA4 25 /* DMA controller channel 4 */ +#define IRQ_DMA5 26 /* DMA controller channel 5 */ +#define IRQ_OST0 27 /* OS Timer match 0 */ +#define IRQ_OST1 28 /* OS Timer match 1 */ +#define IRQ_OST2 29 /* OS Timer match 2 */ +#define IRQ_OST3 30 /* OS Timer match 3 */ +#define IRQ_RTC1Hz 31 /* RTC 1 Hz clock */ +#define IRQ_RTCAlrm 32 /* RTC Alarm */ -#define IRQ_GPIO11 32 -#define IRQ_GPIO12 33 -#define IRQ_GPIO13 34 -#define IRQ_GPIO14 35 -#define IRQ_GPIO15 36 -#define IRQ_GPIO16 37 -#define IRQ_GPIO17 38 -#define IRQ_GPIO18 39 -#define IRQ_GPIO19 40 -#define IRQ_GPIO20 41 -#define IRQ_GPIO21 42 -#define IRQ_GPIO22 43 -#define IRQ_GPIO23 44 -#define IRQ_GPIO24 45 -#define IRQ_GPIO25 46 -#define IRQ_GPIO26 47 -#define IRQ_GPIO27 48 +#define IRQ_GPIO11 33 +#define IRQ_GPIO12 34 +#define IRQ_GPIO13 35 +#define IRQ_GPIO14 36 +#define IRQ_GPIO15 37 +#define IRQ_GPIO16 38 +#define IRQ_GPIO17 39 +#define IRQ_GPIO18 40 +#define IRQ_GPIO19 41 +#define IRQ_GPIO20 42 +#define IRQ_GPIO21 43 +#define IRQ_GPIO22 44 +#define IRQ_GPIO23 45 +#define IRQ_GPIO24 46 +#define IRQ_GPIO25 47 +#define IRQ_GPIO26 48 +#define IRQ_GPIO27 49 /* * The next 16 interrupts are for board specific purposes. Since @@ -65,8 +65,8 @@ * these. If you need more, increase IRQ_BOARD_END, but keep it * within sensible limits. IRQs 49 to 64 are available. */ -#define IRQ_BOARD_START 49 -#define IRQ_BOARD_END 65 +#define IRQ_BOARD_START 50 +#define IRQ_BOARD_END 66 /* * Figure out the MAX IRQ number. diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c index 2124f1fc2fbe..63e2901db416 100644 --- a/arch/arm/mach-sa1100/irq.c +++ b/arch/arm/mach-sa1100/irq.c @@ -14,17 +14,73 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/ioport.h> #include <linux/syscore_ops.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/mach/irq.h> +#include <asm/exception.h> #include "generic.h" /* + * We don't need to ACK IRQs on the SA1100 unless they're GPIOs + * this is for internal IRQs i.e. from IRQ LCD to RTCAlrm. + */ +static void sa1100_mask_irq(struct irq_data *d) +{ + ICMR &= ~BIT(d->hwirq); +} + +static void sa1100_unmask_irq(struct irq_data *d) +{ + ICMR |= BIT(d->hwirq); +} + +/* + * Apart form GPIOs, only the RTC alarm can be a wakeup event. + */ +static int sa1100_set_wake(struct irq_data *d, unsigned int on) +{ + if (BIT(d->hwirq) == IC_RTCAlrm) { + if (on) + PWER |= PWER_RTC; + else + PWER &= ~PWER_RTC; + return 0; + } + return -EINVAL; +} + +static struct irq_chip sa1100_normal_chip = { + .name = "SC", + .irq_ack = sa1100_mask_irq, + .irq_mask = sa1100_mask_irq, + .irq_unmask = sa1100_unmask_irq, + .irq_set_wake = sa1100_set_wake, +}; + +static int sa1100_normal_irqdomain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &sa1100_normal_chip, + handle_level_irq); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +static struct irq_domain_ops sa1100_normal_irqdomain_ops = { + .map = sa1100_normal_irqdomain_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +static struct irq_domain *sa1100_normal_irqdomain; + +/* * SA1100 GPIO edge detection for IRQs: * IRQs are generated on Falling-Edge, Rising-Edge, or both. * Use this instead of directly setting GRER/GFER. @@ -33,20 +89,11 @@ static int GPIO_IRQ_rising_edge; static int GPIO_IRQ_falling_edge; static int GPIO_IRQ_mask = (1 << 11) - 1; -/* - * To get the GPIO number from an IRQ number - */ -#define GPIO_11_27_IRQ(i) ((i) - 21) -#define GPIO11_27_MASK(irq) (1 << GPIO_11_27_IRQ(irq)) - static int sa1100_gpio_type(struct irq_data *d, unsigned int type) { unsigned int mask; - if (d->irq <= 10) - mask = 1 << d->irq; - else - mask = GPIO11_27_MASK(d->irq); + mask = BIT(d->hwirq); if (type == IRQ_TYPE_PROBE) { if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask) @@ -70,41 +117,51 @@ static int sa1100_gpio_type(struct irq_data *d, unsigned int type) } /* - * GPIO IRQs must be acknowledged. This is for IRQs from 0 to 10. + * GPIO IRQs must be acknowledged. */ -static void sa1100_low_gpio_ack(struct irq_data *d) -{ - GEDR = (1 << d->irq); -} - -static void sa1100_low_gpio_mask(struct irq_data *d) -{ - ICMR &= ~(1 << d->irq); -} - -static void sa1100_low_gpio_unmask(struct irq_data *d) +static void sa1100_gpio_ack(struct irq_data *d) { - ICMR |= 1 << d->irq; + GEDR = BIT(d->hwirq); } -static int sa1100_low_gpio_wake(struct irq_data *d, unsigned int on) +static int sa1100_gpio_wake(struct irq_data *d, unsigned int on) { if (on) - PWER |= 1 << d->irq; + PWER |= BIT(d->hwirq); else - PWER &= ~(1 << d->irq); + PWER &= ~BIT(d->hwirq); return 0; } +/* + * This is for IRQs from 0 to 10. + */ static struct irq_chip sa1100_low_gpio_chip = { .name = "GPIO-l", - .irq_ack = sa1100_low_gpio_ack, - .irq_mask = sa1100_low_gpio_mask, - .irq_unmask = sa1100_low_gpio_unmask, + .irq_ack = sa1100_gpio_ack, + .irq_mask = sa1100_mask_irq, + .irq_unmask = sa1100_unmask_irq, .irq_set_type = sa1100_gpio_type, - .irq_set_wake = sa1100_low_gpio_wake, + .irq_set_wake = sa1100_gpio_wake, +}; + +static int sa1100_low_gpio_irqdomain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, + handle_edge_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + + return 0; +} + +static struct irq_domain_ops sa1100_low_gpio_irqdomain_ops = { + .map = sa1100_low_gpio_irqdomain_map, + .xlate = irq_domain_xlate_onetwocell, }; +static struct irq_domain *sa1100_low_gpio_irqdomain; + /* * IRQ11 (GPIO11 through 27) handler. We enter here with the * irq_controller_lock held, and IRQs disabled. Decode the IRQ @@ -141,16 +198,9 @@ sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc) * In addition, the IRQs are all collected up into one bit in the * interrupt controller registers. */ -static void sa1100_high_gpio_ack(struct irq_data *d) -{ - unsigned int mask = GPIO11_27_MASK(d->irq); - - GEDR = mask; -} - static void sa1100_high_gpio_mask(struct irq_data *d) { - unsigned int mask = GPIO11_27_MASK(d->irq); + unsigned int mask = BIT(d->hwirq); GPIO_IRQ_mask &= ~mask; @@ -160,7 +210,7 @@ static void sa1100_high_gpio_mask(struct irq_data *d) static void sa1100_high_gpio_unmask(struct irq_data *d) { - unsigned int mask = GPIO11_27_MASK(d->irq); + unsigned int mask = BIT(d->hwirq); GPIO_IRQ_mask |= mask; @@ -168,61 +218,32 @@ static void sa1100_high_gpio_unmask(struct irq_data *d) GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; } -static int sa1100_high_gpio_wake(struct irq_data *d, unsigned int on) -{ - if (on) - PWER |= GPIO11_27_MASK(d->irq); - else - PWER &= ~GPIO11_27_MASK(d->irq); - return 0; -} - static struct irq_chip sa1100_high_gpio_chip = { .name = "GPIO-h", - .irq_ack = sa1100_high_gpio_ack, + .irq_ack = sa1100_gpio_ack, .irq_mask = sa1100_high_gpio_mask, .irq_unmask = sa1100_high_gpio_unmask, .irq_set_type = sa1100_gpio_type, - .irq_set_wake = sa1100_high_gpio_wake, + .irq_set_wake = sa1100_gpio_wake, }; -/* - * We don't need to ACK IRQs on the SA1100 unless they're GPIOs - * this is for internal IRQs i.e. from 11 to 31. - */ -static void sa1100_mask_irq(struct irq_data *d) -{ - ICMR &= ~(1 << d->irq); -} - -static void sa1100_unmask_irq(struct irq_data *d) +static int sa1100_high_gpio_irqdomain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) { - ICMR |= (1 << d->irq); -} + irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, + handle_edge_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); -/* - * Apart form GPIOs, only the RTC alarm can be a wakeup event. - */ -static int sa1100_set_wake(struct irq_data *d, unsigned int on) -{ - if (d->irq == IRQ_RTCAlrm) { - if (on) - PWER |= PWER_RTC; - else - PWER &= ~PWER_RTC; - return 0; - } - return -EINVAL; + return 0; } -static struct irq_chip sa1100_normal_chip = { - .name = "SC", - .irq_ack = sa1100_mask_irq, - .irq_mask = sa1100_mask_irq, - .irq_unmask = sa1100_unmask_irq, - .irq_set_wake = sa1100_set_wake, +static struct irq_domain_ops sa1100_high_gpio_irqdomain_ops = { + .map = sa1100_high_gpio_irqdomain_map, + .xlate = irq_domain_xlate_onetwocell, }; +static struct irq_domain *sa1100_high_gpio_irqdomain; + static struct resource irq_resource = DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs"); @@ -291,10 +312,25 @@ static int __init sa1100irq_init_devicefs(void) device_initcall(sa1100irq_init_devicefs); -void __init sa1100_init_irq(void) +static asmlinkage void __exception_irq_entry +sa1100_handle_irq(struct pt_regs *regs) { - unsigned int irq; + uint32_t icip, icmr, mask; + + do { + icip = (ICIP); + icmr = (ICMR); + mask = icip & icmr; + + if (mask == 0) + break; + + handle_IRQ(ffs(mask) - 1 + IRQ_GPIO0, regs); + } while (1); +} +void __init sa1100_init_irq(void) +{ request_resource(&iomem_resource, &irq_resource); /* disable all IRQs */ @@ -314,29 +350,24 @@ void __init sa1100_init_irq(void) */ ICCR = 1; - for (irq = 0; irq <= 10; irq++) { - irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, - handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } + sa1100_low_gpio_irqdomain = irq_domain_add_legacy(NULL, + 11, IRQ_GPIO0, 0, + &sa1100_low_gpio_irqdomain_ops, NULL); - for (irq = 12; irq <= 31; irq++) { - irq_set_chip_and_handler(irq, &sa1100_normal_chip, - handle_level_irq); - set_irq_flags(irq, IRQF_VALID); - } + sa1100_normal_irqdomain = irq_domain_add_legacy(NULL, + 21, IRQ_GPIO11_27, 11, + &sa1100_normal_irqdomain_ops, NULL); - for (irq = 32; irq <= 48; irq++) { - irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, - handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } + sa1100_high_gpio_irqdomain = irq_domain_add_legacy(NULL, + 17, IRQ_GPIO11, 11, + &sa1100_high_gpio_irqdomain_ops, NULL); /* * Install handler for GPIO 11-27 edge detect interrupts */ - irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip); irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); + set_handle_irq(sa1100_handle_irq); + sa1100_init_gpio(); } diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 400f80332046..169262e3040d 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -423,7 +423,6 @@ static struct platform_driver neponset_device_driver = { .remove = neponset_remove, .driver = { .name = "neponset", - .owner = THIS_MODULE, .pm = PM_OPS, }, }; diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index b47262afb240..f8197eb6e566 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c @@ -32,7 +32,6 @@ #include <linux/pinctrl/machine.h> #include <linux/platform_data/camera-rcar.h> #include <linux/platform_data/gpio-rcar.h> -#include <linux/platform_data/rcar-du.h> #include <linux/platform_data/usb-rcar-gen2-phy.h> #include <linux/platform_device.h> #include <linux/phy.h> @@ -83,61 +82,6 @@ * */ -/* DU */ -static struct rcar_du_encoder_data lager_du_encoders[] = { - { - .type = RCAR_DU_ENCODER_VGA, - .output = RCAR_DU_OUTPUT_DPAD0, - }, { - .type = RCAR_DU_ENCODER_NONE, - .output = RCAR_DU_OUTPUT_LVDS1, - .connector.lvds.panel = { - .width_mm = 210, - .height_mm = 158, - .mode = { - .pixelclock = 65000000, - .hactive = 1024, - .hfront_porch = 20, - .hback_porch = 160, - .hsync_len = 136, - .vactive = 768, - .vfront_porch = 3, - .vback_porch = 29, - .vsync_len = 6, - }, - }, - }, -}; - -static const struct rcar_du_platform_data lager_du_pdata __initconst = { - .encoders = lager_du_encoders, - .num_encoders = ARRAY_SIZE(lager_du_encoders), -}; - -static const struct resource du_resources[] __initconst = { - DEFINE_RES_MEM(0xfeb00000, 0x70000), - DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"), - DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"), - DEFINE_RES_IRQ(gic_spi(256)), - DEFINE_RES_IRQ(gic_spi(268)), - DEFINE_RES_IRQ(gic_spi(269)), -}; - -static void __init lager_add_du_device(void) -{ - struct platform_device_info info = { - .name = "rcar-du-r8a7790", - .id = -1, - .res = du_resources, - .num_res = ARRAY_SIZE(du_resources), - .data = &lager_du_pdata, - .size_data = sizeof(lager_du_pdata), - .dma_mask = DMA_BIT_MASK(32), - }; - - platform_device_register_full(&info); -} - /* LEDS */ static struct gpio_led lager_leds[] = { { @@ -800,8 +744,6 @@ static void __init lager_add_standard_devices(void) platform_device_register_full(ðer_info); - lager_add_du_device(); - platform_device_register_resndata(NULL, "qspi", 0, qspi_resources, ARRAY_SIZE(qspi_resources), diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index ed1087031c5d..a1c1dfb6a67a 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1149,7 +1149,7 @@ static struct soc_camera_platform_info camera_info = { .format_name = "UYVY", .format_depth = 16, .format = { - .code = V4L2_MBUS_FMT_UYVY8_2X8, + .code = MEDIA_BUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_SMPTE170M, .field = V4L2_FIELD_NONE, .width = 640, diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index 994dc7d86ae2..598f704f76ae 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c @@ -27,7 +27,6 @@ #include <linux/pinctrl/machine.h> #include <linux/platform_data/camera-rcar.h> #include <linux/platform_data/gpio-rcar.h> -#include <linux/platform_data/rcar-du.h> #include <linux/platform_data/usb-rcar-phy.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> @@ -171,62 +170,6 @@ static struct platform_device hspi_device = { .num_resources = ARRAY_SIZE(hspi_resources), }; -/* - * DU - * - * The panel only specifies the [hv]display and [hv]total values. The position - * and width of the sync pulses don't matter, they're copied from VESA timings. - */ -static struct rcar_du_encoder_data du_encoders[] = { - { - .type = RCAR_DU_ENCODER_VGA, - .output = RCAR_DU_OUTPUT_DPAD0, - }, { - .type = RCAR_DU_ENCODER_LVDS, - .output = RCAR_DU_OUTPUT_DPAD1, - .connector.lvds.panel = { - .width_mm = 210, - .height_mm = 158, - .mode = { - .pixelclock = 65000000, - .hactive = 1024, - .hfront_porch = 20, - .hback_porch = 160, - .hsync_len = 136, - .vactive = 768, - .vfront_porch = 3, - .vback_porch = 29, - .vsync_len = 6, - }, - }, - }, -}; - -static const struct rcar_du_platform_data du_pdata __initconst = { - .encoders = du_encoders, - .num_encoders = ARRAY_SIZE(du_encoders), -}; - -static const struct resource du_resources[] __initconst = { - DEFINE_RES_MEM(0xfff80000, 0x40000), - DEFINE_RES_IRQ(gic_iid(0x3f)), -}; - -static void __init marzen_add_du_device(void) -{ - struct platform_device_info info = { - .name = "rcar-du-r8a7779", - .id = -1, - .res = du_resources, - .num_res = ARRAY_SIZE(du_resources), - .data = &du_pdata, - .size_data = sizeof(du_pdata), - .dma_mask = DMA_BIT_MASK(32), - }; - - platform_device_register_full(&info); -} - /* LEDS */ static struct gpio_led marzen_leds[] = { { @@ -385,7 +328,6 @@ static void __init marzen_init(void) platform_device_register_full(&vin1_info); platform_device_register_full(&vin3_info); platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices)); - marzen_add_du_device(); } static const char *marzen_boards_compat_dt[] __initdata = { diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c index 82fe3d7f9662..44a74c4c5a01 100644 --- a/arch/arm/mach-shmobile/pm-r8a7779.c +++ b/arch/arm/mach-shmobile/pm-r8a7779.c @@ -83,9 +83,8 @@ static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) { struct generic_pm_domain *genpd = &r8a7779_pd->genpd; + genpd->flags = GENPD_FLAG_PM_CLK; pm_genpd_init(genpd, NULL, false); - genpd->dev_ops.stop = pm_clk_suspend; - genpd->dev_ops.start = pm_clk_resume; genpd->dev_ops.active_wakeup = pd_active_wakeup; genpd->power_off = pd_power_down; genpd->power_on = pd_power_up; diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 717e6413d29c..6f7d56ecf969 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -106,9 +106,8 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) struct generic_pm_domain *genpd = &rmobile_pd->genpd; struct dev_power_governor *gov = rmobile_pd->gov; + genpd->flags = GENPD_FLAG_PM_CLK; pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); - genpd->dev_ops.stop = pm_clk_suspend; - genpd->dev_ops.start = pm_clk_resume; genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup; genpd->power_off = rmobile_pd_power_down; genpd->power_on = rmobile_pd_power_up; diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 7e5c2676c489..0e37da654ed5 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -423,7 +423,6 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { .desc = "Core Standby Mode", .exit_latency = 10, .target_residency = 20 + 10, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = sh7372_enter_core_standby, }, .states[2] = { @@ -431,7 +430,6 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { .desc = "A3SM PLL ON", .exit_latency = 20, .target_residency = 30 + 20, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = sh7372_enter_a3sm_pll_on, }, .states[3] = { @@ -439,7 +437,6 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { .desc = "A3SM PLL OFF", .exit_latency = 120, .target_residency = 30 + 120, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = sh7372_enter_a3sm_pll_off, }, .states[4] = { @@ -447,7 +444,6 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { .desc = "A4S PLL OFF", .exit_latency = 240, .target_residency = 30 + 240, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = sh7372_enter_a4s, .disabled = true, }, diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c index 425b6c8f0cb0..f2b586d7b15d 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra114.c +++ b/arch/arm/mach-tegra/cpuidle-tegra114.c @@ -75,7 +75,6 @@ static struct cpuidle_driver tegra_idle_driver = { .exit_latency = 500, .target_residency = 1000, .power_usage = 0, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "powered-down", .desc = "CPU power gated", }, diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index b30bf5cba65b..4f25a7c7ca0f 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c @@ -59,8 +59,7 @@ static struct cpuidle_driver tegra_idle_driver = { .exit_latency = 5000, .target_residency = 10000, .power_usage = 0, - .flags = CPUIDLE_FLAG_TIME_VALID | - CPUIDLE_FLAG_COUPLED, + .flags = CPUIDLE_FLAG_COUPLED, .name = "powered-down", .desc = "CPU power gated", }, diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c index 35561274f6cf..f8815ed65d9d 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra30.c +++ b/arch/arm/mach-tegra/cpuidle-tegra30.c @@ -56,7 +56,6 @@ static struct cpuidle_driver tegra_idle_driver = { .exit_latency = 2000, .target_residency = 2200, .power_usage = 0, - .flags = CPUIDLE_FLAG_TIME_VALID, .name = "powered-down", .desc = "CPU power gated", }, diff --git a/arch/arm/mach-u300/regulator.c b/arch/arm/mach-u300/regulator.c index 0493a845b6bc..595b574c2c50 100644 --- a/arch/arm/mach-u300/regulator.c +++ b/arch/arm/mach-u300/regulator.c @@ -116,7 +116,6 @@ static const struct of_device_id s365_board_match[] = { static struct platform_driver s365_board_driver = { .driver = { .name = "s365-board", - .owner = THIS_MODULE, .of_match_table = s365_board_match, }, }; diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ab906b801047..03823e784f63 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1009,3 +1009,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN help This option specifies the architecture can support big endian operation. + +config ARM_KERNMEM_PERMS + bool "Restrict kernel memory permissions" + help + If this is set, kernel memory other than kernel text (and rodata) + will be made non-executable. The tradeoff is that each region is + padded to section-size (1MiB) boundaries (because their permissions + are different and splitting the 1M pages into 4K ones causes TLB + performance problems), wasting memory. + +config DEBUG_RODATA + bool "Make kernel text and rodata read-only" + depends on ARM_KERNMEM_PERMS + default y + help + If this is set, kernel text and rodata will be made read-only. This + is to help catch accidental or malicious attempts to change the + kernel's executable code. Additionally splits rodata from kernel + text so it can be made explicitly non-executable. This creates + another section-size padded region, so it can waste more memory + space while gaining the read-only protections. diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 91da64de440f..d3afdf9eb65a 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ iomap.o obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ - mmap.o pgd.o mmu.o + mmap.o pgd.o mmu.o pageattr.o ifneq ($(CONFIG_MMU),y) obj-y += nommu.o diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 83792f4324ea..2c0c541c60ca 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -113,7 +113,7 @@ static int safe_usermode(int new_usermode, bool warn) new_usermode |= UM_FIXUP; if (warn) - printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); + pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); } return new_usermode; @@ -523,7 +523,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg * processor for us. */ if (addr != eaddr) { - printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, " + pr_err("LDMSTM: PC = %08lx, instr = %08lx, " "addr = %08lx, eaddr = %08lx\n", instruction_pointer(regs), instr, addr, eaddr); show_regs(regs); @@ -567,7 +567,7 @@ fault: return TYPE_FAULT; bad: - printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n"); + pr_err("Alignment trap: not handling ldm with s-bit set\n"); return TYPE_ERROR; } @@ -899,13 +899,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) return 0; swp: - printk(KERN_ERR "Alignment trap: not handling swp instruction\n"); + pr_err("Alignment trap: not handling swp instruction\n"); bad: /* * Oops, we didn't handle the instruction. */ - printk(KERN_ERR "Alignment trap: not handling instruction " + pr_err("Alignment trap: not handling instruction " "%0*lx at [<%08lx>]\n", isize << 1, isize == 2 ? tinstr : instr, instrptr); diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e028a7f2ebcc..097181e08c25 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -313,7 +313,7 @@ static void __init disable_l2_prefetch(void) */ u = read_extra_features(); if (!(u & 0x01000000)) { - printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n"); + pr_info("Feroceon L2: Disabling L2 prefetch.\n"); write_extra_features(u | 0x01000000); } } @@ -326,7 +326,7 @@ static void __init enable_l2(void) if (!(u & 0x00400000)) { int i, d; - printk(KERN_INFO "Feroceon L2: Enabling L2\n"); + pr_info("Feroceon L2: Enabling L2\n"); d = flush_and_disable_dcache(); i = invalidate_and_disable_icache(); @@ -353,7 +353,7 @@ void __init feroceon_l2_init(int __l2_wt_override) enable_l2(); - printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n", + pr_info("Feroceon L2: Cache support initialised%s.\n", l2_wt_override ? ", in WT override mode" : ""); } #ifdef CONFIG_OF diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index b273739e6359..1e373d268c04 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c @@ -185,7 +185,7 @@ static void enable_extra_feature(unsigned int features) u &= ~0x01000000; else u |= 0x01000000; - printk(KERN_INFO "Tauros2: %s L2 prefetch.\n", + pr_info("Tauros2: %s L2 prefetch.\n", (features & CACHE_TAUROS2_PREFETCH_ON) ? "Enabling" : "Disabling"); @@ -193,7 +193,7 @@ static void enable_extra_feature(unsigned int features) u |= 0x00100000; else u &= ~0x00100000; - printk(KERN_INFO "Tauros2: %s line fill burt8.\n", + pr_info("Tauros2: %s line fill burt8.\n", (features & CACHE_TAUROS2_LINEFILL_BURST8) ? "Enabling" : "Disabling"); @@ -216,7 +216,7 @@ static void __init tauros2_internal_init(unsigned int features) */ feat = read_extra_features(); if (!(feat & 0x00400000)) { - printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); + pr_info("Tauros2: Enabling L2 cache.\n"); write_extra_features(feat | 0x00400000); } @@ -253,7 +253,7 @@ static void __init tauros2_internal_init(unsigned int features) */ actlr = read_actlr(); if (!(actlr & 0x00000002)) { - printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); + pr_info("Tauros2: Enabling L2 cache.\n"); write_actlr(actlr | 0x00000002); } @@ -262,11 +262,11 @@ static void __init tauros2_internal_init(unsigned int features) #endif if (mode == NULL) { - printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n"); + pr_crit("Tauros2: Unable to detect CPU mode.\n"); return; } - printk(KERN_INFO "Tauros2: L2 cache support initialised " + pr_info("Tauros2: L2 cache support initialised " "in %s mode.\n", mode); } diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 6eb97b3a7481..91892569710f 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) u64 asid = atomic64_read(&mm->context.id); u64 generation = atomic64_read(&asid_generation); - if (asid != 0 && is_reserved_asid(asid)) { + if (asid != 0) { /* - * Our current ASID was active during a rollover, we can - * continue to use it and this was just a false alarm. + * If our current ASID was active during a rollover, we + * can continue to use it and this was just a false alarm. */ - asid = generation | (asid & ~ASID_MASK); - } else { + if (is_reserved_asid(asid)) + return generation | (asid & ~ASID_MASK); + /* - * Allocate a free ASID. If we can't find one, take a - * note of the currently active ASIDs and mark the TLBs - * as requiring flushes. We always count from ASID #1, - * as we reserve ASID #0 to switch via TTBR0 and to - * avoid speculative page table walks from hitting in - * any partial walk caches, which could be populated - * from overlapping level-1 descriptors used to map both - * the module area and the userspace stack. + * We had a valid ASID in a previous life, so try to re-use + * it if possible., */ - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); - if (asid == NUM_USER_ASIDS) { - generation = atomic64_add_return(ASID_FIRST_VERSION, - &asid_generation); - flush_context(cpu); - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); - } - __set_bit(asid, asid_map); - cur_idx = asid; - asid |= generation; - cpumask_clear(mm_cpumask(mm)); + asid &= ~ASID_MASK; + if (!__test_and_set_bit(asid, asid_map)) + goto bump_gen; } + /* + * Allocate a free ASID. If we can't find one, take a note of the + * currently active ASIDs and mark the TLBs as requiring flushes. + * We always count from ASID #1, as we reserve ASID #0 to switch + * via TTBR0 and to avoid speculative page table walks from hitting + * in any partial walk caches, which could be populated from + * overlapping level-1 descriptors used to map both the module + * area and the userspace stack. + */ + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + if (asid == NUM_USER_ASIDS) { + generation = atomic64_add_return(ASID_FIRST_VERSION, + &asid_generation); + flush_context(cpu); + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); + } + + __set_bit(asid, asid_map); + cur_idx = asid; + +bump_gen: + asid |= generation; + cpumask_clear(mm_cpumask(mm)); return asid; } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index b9bcc9d79176..70423345da26 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -62,7 +62,7 @@ static void discard_old_kernel_data(void *kto) __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" : : "r" (kto), - "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) + "r" ((unsigned long)kto + PAGE_SIZE - 1) : "cc"); } diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index ff379ac115df..d9e0d00a6699 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -235,7 +235,7 @@ void __init check_writebuffer_bugs(void) const char *reason; unsigned long v = 1; - printk(KERN_INFO "CPU: Testing write buffer coherency: "); + pr_info("CPU: Testing write buffer coherency: "); page = alloc_page(GFP_KERNEL); if (page) { @@ -261,9 +261,9 @@ void __init check_writebuffer_bugs(void) } if (v) { - printk("failed, %s\n", reason); + pr_cont("failed, %s\n", reason); shared_pte_mask = L_PTE_MT_UNCACHED; } else { - printk("ok\n"); + pr_cont("ok\n"); } } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index eb8830a4c5ed..a982dc3190df 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -63,9 +63,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr) if (!mm) mm = &init_mm; - printk(KERN_ALERT "pgd = %p\n", mm->pgd); + pr_alert("pgd = %p\n", mm->pgd); pgd = pgd_offset(mm, addr); - printk(KERN_ALERT "[%08lx] *pgd=%08llx", + pr_alert("[%08lx] *pgd=%08llx", addr, (long long)pgd_val(*pgd)); do { @@ -77,31 +77,31 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; if (pgd_bad(*pgd)) { - printk("(bad)"); + pr_cont("(bad)"); break; } pud = pud_offset(pgd, addr); if (PTRS_PER_PUD != 1) - printk(", *pud=%08llx", (long long)pud_val(*pud)); + pr_cont(", *pud=%08llx", (long long)pud_val(*pud)); if (pud_none(*pud)) break; if (pud_bad(*pud)) { - printk("(bad)"); + pr_cont("(bad)"); break; } pmd = pmd_offset(pud, addr); if (PTRS_PER_PMD != 1) - printk(", *pmd=%08llx", (long long)pmd_val(*pmd)); + pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd)); if (pmd_none(*pmd)) break; if (pmd_bad(*pmd)) { - printk("(bad)"); + pr_cont("(bad)"); break; } @@ -110,15 +110,15 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; pte = pte_offset_map(pmd, addr); - printk(", *pte=%08llx", (long long)pte_val(*pte)); + pr_cont(", *pte=%08llx", (long long)pte_val(*pte)); #ifndef CONFIG_ARM_LPAE - printk(", *ppte=%08llx", + pr_cont(", *ppte=%08llx", (long long)pte_val(pte[PTE_HWTABLE_PTRS])); #endif pte_unmap(pte); } while(0); - printk("\n"); + pr_cont("\n"); } #else /* CONFIG_MMU */ void show_pte(struct mm_struct *mm, unsigned long addr) @@ -142,10 +142,9 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, * No handler, we'll have to terminate things with extreme prejudice. */ bust_spinlocks(1); - printk(KERN_ALERT - "Unable to handle kernel %s at virtual address %08lx\n", - (addr < PAGE_SIZE) ? "NULL pointer dereference" : - "paging request", addr); + pr_alert("Unable to handle kernel %s at virtual address %08lx\n", + (addr < PAGE_SIZE) ? "NULL pointer dereference" : + "paging request", addr); show_pte(mm, addr); die("Oops", regs, fsr); @@ -551,7 +550,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; - printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", + pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); info.si_signo = inf->sig; @@ -583,7 +582,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; - printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", + pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); info.si_signo = inf->sig; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 265b836b3bd1..34b66af516ea 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -33,7 +33,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) asm( "mcrr p15, 0, %1, %0, c14\n" " mcr p15, 0, %2, c7, c10, 4" : - : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) + : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) : "cc"); } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index e17ed00828d7..b98895d9fe57 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -18,19 +18,20 @@ #include <asm/tlbflush.h> #include "mm.h" -pte_t *fixmap_page_table; - static inline void set_fixmap_pte(int idx, pte_t pte) { unsigned long vaddr = __fix_to_virt(idx); - set_pte_ext(fixmap_page_table + idx, pte, 0); + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); } static inline pte_t get_fixmap_pte(unsigned long vaddr) { - unsigned long idx = __virt_to_fix(vaddr); - return *(fixmap_page_table + idx); + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + return *ptep; } void *kmap(struct page *page) @@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page) * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ - BUG_ON(!pte_none(*(fixmap_page_table + idx))); + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping @@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn) idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(fixmap_page_table + idx))); + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9481f85c56e6..98ad9c79ea0e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -29,6 +29,7 @@ #include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/system_info.h> #include <asm/tlb.h> #include <asm/fixmap.h> @@ -67,7 +68,7 @@ early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { - printk(KERN_WARNING "ATAG_INITRD is deprecated; " + pr_warn("ATAG_INITRD is deprecated; " "please update your bootloader.\n"); phys_initrd_start = __virt_to_phys(tag->u.initrd.start); phys_initrd_size = tag->u.initrd.size; @@ -544,7 +545,7 @@ void __init mem_init(void) #define MLM(b, t) b, t, ((t) - (b)) >> 20 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) - printk(KERN_NOTICE "Virtual kernel memory layout:\n" + pr_notice("Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HAVE_TCM " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" @@ -570,7 +571,7 @@ void __init mem_init(void) MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif - MLK(FIXADDR_START, FIXADDR_TOP), + MLK(FIXADDR_START, FIXADDR_END), MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM @@ -615,7 +616,145 @@ void __init mem_init(void) } } -void free_initmem(void) +#ifdef CONFIG_ARM_KERNMEM_PERMS +struct section_perm { + unsigned long start; + unsigned long end; + pmdval_t mask; + pmdval_t prot; + pmdval_t clear; +}; + +static struct section_perm nx_perms[] = { + /* Make pages tables, etc before _stext RW (set NX). */ + { + .start = PAGE_OFFSET, + .end = (unsigned long)_stext, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, + /* Make init RW (set NX). */ + { + .start = (unsigned long)__init_begin, + .end = (unsigned long)_sdata, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, +#ifdef CONFIG_DEBUG_RODATA + /* Make rodata NX (set RO in ro_perms below). */ + { + .start = (unsigned long)__start_rodata, + .end = (unsigned long)__init_begin, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, +#endif +}; + +#ifdef CONFIG_DEBUG_RODATA +static struct section_perm ro_perms[] = { + /* Make kernel code and rodata RX (set RO). */ + { + .start = (unsigned long)_stext, + .end = (unsigned long)__init_begin, +#ifdef CONFIG_ARM_LPAE + .mask = ~PMD_SECT_RDONLY, + .prot = PMD_SECT_RDONLY, +#else + .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), + .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, + .clear = PMD_SECT_AP_WRITE, +#endif + }, +}; +#endif + +/* + * Updates section permissions only for the current mm (sections are + * copied into each mm). During startup, this is the init_mm. Is only + * safe to be called with preemption disabled, as under stop_machine(). + */ +static inline void section_update(unsigned long addr, pmdval_t mask, + pmdval_t prot) +{ + struct mm_struct *mm; + pmd_t *pmd; + + mm = current->active_mm; + pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); + +#ifdef CONFIG_ARM_LPAE + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); +#else + if (addr & SECTION_SIZE) + pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); + else + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); +#endif + flush_pmd_entry(pmd); + local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); +} + +/* Make sure extended page tables are in use. */ +static inline bool arch_has_strict_perms(void) +{ + if (cpu_architecture() < CPU_ARCH_ARMv6) + return false; + + return !!(get_cr() & CR_XP); +} + +#define set_section_perms(perms, field) { \ + size_t i; \ + unsigned long addr; \ + \ + if (!arch_has_strict_perms()) \ + return; \ + \ + for (i = 0; i < ARRAY_SIZE(perms); i++) { \ + if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ + !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ + pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ + perms[i].start, perms[i].end, \ + SECTION_SIZE); \ + continue; \ + } \ + \ + for (addr = perms[i].start; \ + addr < perms[i].end; \ + addr += SECTION_SIZE) \ + section_update(addr, perms[i].mask, \ + perms[i].field); \ + } \ +} + +static inline void fix_kernmem_perms(void) +{ + set_section_perms(nx_perms, prot); +} + +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void) +{ + set_section_perms(ro_perms, prot); +} + +void set_kernel_text_rw(void) +{ + set_section_perms(ro_perms, clear); +} + +void set_kernel_text_ro(void) +{ + set_section_perms(ro_perms, prot); +} +#endif /* CONFIG_DEBUG_RODATA */ + +#else +static inline void fix_kernmem_perms(void) { } +#endif /* CONFIG_ARM_KERNMEM_PERMS */ + +void free_tcmmem(void) { #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; @@ -623,6 +762,12 @@ void free_initmem(void) poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif +} + +void free_initmem(void) +{ + fix_kernmem_perms(); + free_tcmmem(); poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f98cec7fe1e..cda7c40999b6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -22,6 +22,7 @@ #include <asm/cputype.h> #include <asm/sections.h> #include <asm/cachetype.h> +#include <asm/fixmap.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> @@ -52,6 +53,8 @@ EXPORT_SYMBOL(empty_zero_page); */ pmd_t *top_pmd; +pmdval_t user_pmd_table = _PAGE_USER_TABLE; + #define CPOLICY_UNCACHED 0 #define CPOLICY_BUFFERED 1 #define CPOLICY_WRITETHROUGH 2 @@ -192,7 +195,7 @@ early_param("cachepolicy", early_cachepolicy); static int __init early_nocache(char *__unused) { char *p = "buffered"; - printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); + pr_warn("nocache is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; } @@ -201,7 +204,7 @@ early_param("nocache", early_nocache); static int __init early_nowrite(char *__unused) { char *p = "uncached"; - printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); + pr_warn("nowb is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; } @@ -354,43 +357,28 @@ const struct mem_type *get_mem_type(unsigned int type) } EXPORT_SYMBOL(get_mem_type); -#define PTE_SET_FN(_name, pteop) \ -static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \ - void *data) \ -{ \ - pte_t pte = pteop(*ptep); \ -\ - set_pte_ext(ptep, pte, 0); \ - return 0; \ -} \ - -#define SET_MEMORY_FN(_name, callback) \ -int set_memory_##_name(unsigned long addr, int numpages) \ -{ \ - unsigned long start = addr; \ - unsigned long size = PAGE_SIZE*numpages; \ - unsigned end = start + size; \ -\ - if (start < MODULES_VADDR || start >= MODULES_END) \ - return -EINVAL;\ -\ - if (end < MODULES_VADDR || end >= MODULES_END) \ - return -EINVAL; \ -\ - apply_to_page_range(&init_mm, start, size, callback, NULL); \ - flush_tlb_kernel_range(start, end); \ - return 0;\ -} +/* + * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). + * As a result, this can only be called with preemption disabled, as under + * stop_machine(). + */ +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) +{ + unsigned long vaddr = __fix_to_virt(idx); + pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); -PTE_SET_FN(ro, pte_wrprotect) -PTE_SET_FN(rw, pte_mkwrite) -PTE_SET_FN(x, pte_mkexec) -PTE_SET_FN(nx, pte_mknexec) + /* Make sure fixmap region does not exceed available allocation. */ + BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > + FIXADDR_END); + BUG_ON(idx >= __end_of_fixed_addresses); -SET_MEMORY_FN(ro, pte_set_ro) -SET_MEMORY_FN(rw, pte_set_rw) -SET_MEMORY_FN(x, pte_set_x) -SET_MEMORY_FN(nx, pte_set_nx) + if (pgprot_val(prot)) + set_pte_at(NULL, vaddr, pte, + pfn_pte(phys >> PAGE_SHIFT, prot)); + else + pte_clear(NULL, vaddr, pte); + local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); +} /* * Adjust the PMD section entries according to the CPU in use. @@ -528,14 +516,23 @@ static void __init build_mem_type_table(void) hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; +#ifndef CONFIG_ARM_LPAE /* * We don't use domains on ARMv6 (since this causes problems with * v6/v7 kernels), so we must use a separate memory type for user * r/o, kernel r/w to map the vectors page. */ -#ifndef CONFIG_ARM_LPAE if (cpu_arch == CPU_ARCH_ARMv6) vecs_pgprot |= L_PTE_MT_VECTORS; + + /* + * Check is it with support for the PXN bit + * in the Short-descriptor translation table format descriptors. + */ + if (cpu_arch == CPU_ARCH_ARMv7 && + (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) { + user_pmd_table |= PMD_PXNTABLE; + } #endif /* @@ -605,6 +602,11 @@ static void __init build_mem_type_table(void) } kern_pgprot |= PTE_EXT_AF; vecs_pgprot |= PTE_EXT_AF; + + /* + * Set PXN for user mappings + */ + user_pgprot |= PTE_EXT_PXN; #endif for (i = 0; i < 16; i++) { @@ -786,8 +788,7 @@ static void __init create_36bit_mapping(struct map_desc *md, length = PAGE_ALIGN(md->length); if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { - printk(KERN_ERR "MM: CPU does not support supersection " - "mapping for 0x%08llx at 0x%08lx\n", + pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n", (long long)__pfn_to_phys((u64)md->pfn), addr); return; } @@ -799,15 +800,13 @@ static void __init create_36bit_mapping(struct map_desc *md, * of the actual domain assignments in use. */ if (type->domain) { - printk(KERN_ERR "MM: invalid domain in supersection " - "mapping for 0x%08llx at 0x%08lx\n", + pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n", (long long)__pfn_to_phys((u64)md->pfn), addr); return; } if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { - printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" - " at 0x%08lx invalid alignment\n", + pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n", (long long)__pfn_to_phys((u64)md->pfn), addr); return; } @@ -850,18 +849,16 @@ static void __init create_mapping(struct map_desc *md) pgd_t *pgd; if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { - printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" - " at 0x%08lx in user region\n", - (long long)__pfn_to_phys((u64)md->pfn), md->virtual); + pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n", + (long long)__pfn_to_phys((u64)md->pfn), md->virtual); return; } if ((md->type == MT_DEVICE || md->type == MT_ROM) && md->virtual >= PAGE_OFFSET && (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { - printk(KERN_WARNING "BUG: mapping for 0x%08llx" - " at 0x%08lx out of vmalloc space\n", - (long long)__pfn_to_phys((u64)md->pfn), md->virtual); + pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", + (long long)__pfn_to_phys((u64)md->pfn), md->virtual); } type = &mem_types[md->type]; @@ -881,9 +878,8 @@ static void __init create_mapping(struct map_desc *md) length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { - printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " - "be mapped using pages, ignoring.\n", - (long long)__pfn_to_phys(md->pfn), addr); + pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", + (long long)__pfn_to_phys(md->pfn), addr); return; } @@ -1053,15 +1049,13 @@ static int __init early_vmalloc(char *arg) if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; - printk(KERN_WARNING - "vmalloc area too small, limiting to %luMB\n", + pr_warn("vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); - printk(KERN_WARNING - "vmalloc area is too big, limiting to %luMB\n", + pr_warn("vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } @@ -1094,7 +1088,7 @@ void __init sanity_check_meminfo(void) if (highmem) { pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", - &block_start, &block_end); + &block_start, &block_end); memblock_remove(reg->base, reg->size); continue; } @@ -1103,7 +1097,7 @@ void __init sanity_check_meminfo(void) phys_addr_t overlap_size = reg->size - size_limit; pr_notice("Truncating RAM at %pa-%pa to -%pa", - &block_start, &block_end, &vmalloc_limit); + &block_start, &block_end, &vmalloc_limit); memblock_remove(vmalloc_limit, overlap_size); block_end = vmalloc_limit; } @@ -1326,10 +1320,10 @@ static void __init kmap_init(void) #ifdef CONFIG_HIGHMEM pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), PKMAP_BASE, _PAGE_KERNEL_TABLE); - - fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), - FIXADDR_START, _PAGE_KERNEL_TABLE); #endif + + early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START, + _PAGE_KERNEL_TABLE); } static void __init map_lowmem(void) @@ -1349,13 +1343,20 @@ static void __init map_lowmem(void) if (start >= end) break; - if (end < kernel_x_start || start >= kernel_x_end) { + if (end < kernel_x_start) { map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY_RWX; create_mapping(&map); + } else if (start >= kernel_x_end) { + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY_RW; + + create_mapping(&map); } else { /* This better cover the entire kernel */ if (start < kernel_x_start) { diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c new file mode 100644 index 000000000000..004e35cdcfff --- /dev/null +++ b/arch/arm/mm/pageattr.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/mm.h> +#include <linux/module.h> + +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, + void *data) +{ + struct page_change_data *cdata = data; + pte_t pte = *ptep; + + pte = clear_pte_bit(pte, cdata->clear_mask); + pte = set_pte_bit(pte, cdata->set_mask); + + set_pte_ext(ptep, pte, 0); + return 0; +} + +static int change_memory_common(unsigned long addr, int numpages, + pgprot_t set_mask, pgprot_t clear_mask) +{ + unsigned long start = addr; + unsigned long size = PAGE_SIZE*numpages; + unsigned long end = start + size; + int ret; + struct page_change_data data; + + if (!IS_ALIGNED(addr, PAGE_SIZE)) { + start &= PAGE_MASK; + end = start + size; + WARN_ON_ONCE(1); + } + + if (!is_module_address(start) || !is_module_address(end - 1)) + return -EINVAL; + + data.set_mask = set_mask; + data.clear_mask = clear_mask; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range, + &data); + + flush_tlb_kernel_range(start, end); + return ret; +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(L_PTE_RDONLY), + __pgprot(0)); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(0), + __pgprot(L_PTE_RDONLY)); +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(L_PTE_XN), + __pgprot(0)); +} + +int set_memory_x(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(0), + __pgprot(L_PTE_XN)); +} diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 22ac2a6fbfe3..8b4ee5e81c14 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -591,9 +591,10 @@ __krait_proc_info: /* * Some Krait processors don't indicate support for SDIV and UDIV * instructions in the ARM instruction set, even though they actually - * do support them. + * do support them. They also don't indicate support for fused multiply + * instructions even though they actually do support them. */ - __v7_proc __v7_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4 .size __krait_proc_info, . - __krait_proc_info /* diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c index 4e729f055a81..ec717c190e2c 100644 --- a/arch/arm/nwfpe/fpmodule.c +++ b/arch/arm/nwfpe/fpmodule.c @@ -86,20 +86,20 @@ extern void nwfpe_enter(void); static int __init fpe_init(void) { if (sizeof(FPA11) > sizeof(union fp_state)) { - printk(KERN_ERR "nwfpe: bad structure size\n"); + pr_err("nwfpe: bad structure size\n"); return -EINVAL; } if (sizeof(FPREG) != 12) { - printk(KERN_ERR "nwfpe: bad register size\n"); + pr_err("nwfpe: bad register size\n"); return -EINVAL; } if (fpe_type[0] && strcmp(fpe_type, "nwfpe")) return 0; /* Display title, version and copyright information. */ - printk(KERN_WARNING "NetWinder Floating Point Emulator V0.97 (" - NWFPE_BITS " precision)\n"); + pr_info("NetWinder Floating Point Emulator V0.97 (" + NWFPE_BITS " precision)\n"); thread_register_notifier(&nwfpe_notifier_block); diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index e048f6198d68..5168a52a17f9 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -306,9 +306,10 @@ EXPORT_SYMBOL(orion_gpio_set_blink); #define ORION_BLINK_HALF_PERIOD 100 /* ms */ -int orion_gpio_led_blink_set(unsigned gpio, int state, +int orion_gpio_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off) { + unsigned gpio = desc_to_gpio(desc); if (delay_on && delay_off && !*delay_on && !*delay_off) *delay_on = *delay_off = ORION_BLINK_HALF_PERIOD; @@ -505,9 +506,9 @@ static void orion_gpio_unmask_irq(struct irq_data *d) u32 mask = d->mask; irq_gc_lock(gc); - reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask); + reg_val = irq_reg_readl(gc, ct->regs.mask); reg_val |= mask; - irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask); + irq_reg_writel(gc, reg_val, ct->regs.mask); irq_gc_unlock(gc); } @@ -519,9 +520,9 @@ static void orion_gpio_mask_irq(struct irq_data *d) u32 reg_val; irq_gc_lock(gc); - reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask); + reg_val = irq_reg_readl(gc, ct->regs.mask); reg_val &= ~mask; - irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask); + irq_reg_writel(gc, reg_val, ct->regs.mask); irq_gc_unlock(gc); } diff --git a/arch/arm/plat-orion/include/plat/orion-gpio.h b/arch/arm/plat-orion/include/plat/orion-gpio.h index e763988b04b9..e856b073a9c8 100644 --- a/arch/arm/plat-orion/include/plat/orion-gpio.h +++ b/arch/arm/plat-orion/include/plat/orion-gpio.h @@ -14,12 +14,15 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/irqdomain.h> + +struct gpio_desc; + /* * Orion-specific GPIO API extensions. */ void orion_gpio_set_unused(unsigned pin); void orion_gpio_set_blink(unsigned pin, int blink); -int orion_gpio_led_blink_set(unsigned gpio, int state, +int orion_gpio_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); #define GPIO_INPUT_OK (1 << 0) diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index 1f5ee17a10e8..ad9529cc4203 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c @@ -268,7 +268,6 @@ static struct platform_driver pxa_ssp_driver = { .probe = pxa_ssp_probe, .remove = pxa_ssp_remove, .driver = { - .owner = THIS_MODULE, .name = "pxa2xx-ssp", .of_match_table = of_match_ptr(pxa_ssp_of_ids), }, diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index 468352633101..e2be70df06c6 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c @@ -505,7 +505,6 @@ static struct platform_driver s3c_adc_driver = { .id_table = s3c_adc_driver_ids, .driver = { .name = "s3c-adc", - .owner = THIS_MODULE, .pm = &adc_pm_ops, }, .probe = s3c_adc_probe, diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index cda654cbf2c2..f74a8f7e5f84 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -197,6 +197,12 @@ look_for_VFP_exceptions: tst r5, #FPSCR_IXE bne process_exception + tst r5, #FPSCR_LENGTH_MASK + beq skip + orr r1, r1, #FPEXC_DEX + b process_exception +skip: + @ Fall into hand on to next handler - appropriate coproc instr @ not recognised by VFP diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 2f37e1d6cb45..f6e4d56eda00 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -738,63 +738,73 @@ static int __init vfp_init(void) vfp_vector = vfp_null_entry; pr_info("VFP support v0.3: "); - if (VFP_arch) + if (VFP_arch) { pr_cont("not present\n"); - else if (vfpsid & FPSID_NODOUBLE) { - pr_cont("no double precision support\n"); - } else { - hotcpu_notifier(vfp_hotplug, 0); - - VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ - pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n", - (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, - (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, - (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, - (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, - (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); - - vfp_vector = vfp_support_entry; - - thread_register_notifier(&vfp_notifier_block); - vfp_pm_init(); - - /* - * We detected VFP, and the support code is - * in place; report VFP support to userspace. - */ - elf_hwcap |= HWCAP_VFP; -#ifdef CONFIG_VFPv3 - if (VFP_arch >= 2) { - elf_hwcap |= HWCAP_VFPv3; - - /* - * Check for VFPv3 D16 and VFPv4 D16. CPUs in - * this configuration only have 16 x 64bit - * registers. - */ - if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) - elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ - else - elf_hwcap |= HWCAP_VFPD32; - } -#endif + return 0; + /* Extract the architecture on CPUID scheme */ + } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { + VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK; + VFP_arch >>= FPSID_ARCH_BIT; /* * Check for the presence of the Advanced SIMD * load/store instructions, integer and single * precision floating point operations. Only check * for NEON if the hardware has the MVFR registers. */ - if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { -#ifdef CONFIG_NEON - if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) - elf_hwcap |= HWCAP_NEON; -#endif -#ifdef CONFIG_VFPv3 + if (IS_ENABLED(CONFIG_NEON) && + (fmrx(MVFR1) & 0x000fff00) == 0x00011100) + elf_hwcap |= HWCAP_NEON; + + if (IS_ENABLED(CONFIG_VFPv3)) { + u32 mvfr0 = fmrx(MVFR0); + if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 || + ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) { + elf_hwcap |= HWCAP_VFPv3; + /* + * Check for VFPv3 D16 and VFPv4 D16. CPUs in + * this configuration only have 16 x 64bit + * registers. + */ + if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1) + /* also v4-D16 */ + elf_hwcap |= HWCAP_VFPv3D16; + else + elf_hwcap |= HWCAP_VFPD32; + } + if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) elf_hwcap |= HWCAP_VFPv4; -#endif } + /* Extract the architecture version on pre-cpuid scheme */ + } else { + if (vfpsid & FPSID_NODOUBLE) { + pr_cont("no double precision support\n"); + return 0; + } + + VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; } + + hotcpu_notifier(vfp_hotplug, 0); + + vfp_vector = vfp_support_entry; + + thread_register_notifier(&vfp_notifier_block); + vfp_pm_init(); + + /* + * We detected VFP, and the support code is + * in place; report VFP support to userspace. + */ + elf_hwcap |= HWCAP_VFP; + + pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n", + (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, + VFP_arch, + (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, + (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, + (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); + return 0; } diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c index 4f96c1617aae..f0465ba0f221 100644 --- a/arch/arm/vfp/vfpsingle.c +++ b/arch/arm/vfp/vfpsingle.c @@ -290,7 +290,7 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand) u32 z, a; if ((significand & 0xc0000000) != 0x40000000) { - printk(KERN_WARNING "VFP: estimate_sqrt: invalid significand\n"); + pr_warn("VFP: estimate_sqrt: invalid significand\n"); } a = significand << 1; diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 1f85bfe6b470..12969523414c 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile @@ -1 +1 @@ -obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o +obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 0e15f011f9c8..c7ca936ebd99 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -261,11 +261,6 @@ static int __init xen_guest_init(void) xen_setup_features(); - if (!xen_feature(XENFEAT_grant_map_identity)) { - pr_warn("Please upgrade your Xen.\n" - "If your platform has any non-coherent DMA devices, they won't work properly.\n"); - } - if (xen_feature(XENFEAT_dom0)) xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; else diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index b0e77de99148..351b24a979d4 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -1,6 +1,10 @@ +#include <linux/cpu.h> +#include <linux/dma-mapping.h> #include <linux/bootmem.h> #include <linux/gfp.h> +#include <linux/highmem.h> #include <linux/export.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/dma-mapping.h> @@ -8,6 +12,7 @@ #include <linux/swiotlb.h> #include <xen/xen.h> +#include <xen/interface/grant_table.h> #include <xen/interface/memory.h> #include <xen/swiotlb-xen.h> @@ -16,6 +21,114 @@ #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> +enum dma_cache_op { + DMA_UNMAP, + DMA_MAP, +}; +static bool hypercall_cflush = false; + +/* functions called by SWIOTLB */ + +static void dma_cache_maint(dma_addr_t handle, unsigned long offset, + size_t size, enum dma_data_direction dir, enum dma_cache_op op) +{ + struct gnttab_cache_flush cflush; + unsigned long pfn; + size_t left = size; + + pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + + do { + size_t len = left; + + /* buffers in highmem or foreign pages cannot cross page + * boundaries */ + if (len + offset > PAGE_SIZE) + len = PAGE_SIZE - offset; + + cflush.op = 0; + cflush.a.dev_bus_addr = pfn << PAGE_SHIFT; + cflush.offset = offset; + cflush.length = len; + + if (op == DMA_UNMAP && dir != DMA_TO_DEVICE) + cflush.op = GNTTAB_CACHE_INVAL; + if (op == DMA_MAP) { + if (dir == DMA_FROM_DEVICE) + cflush.op = GNTTAB_CACHE_INVAL; + else + cflush.op = GNTTAB_CACHE_CLEAN; + } + if (cflush.op) + HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); + + offset = 0; + pfn++; + left -= len; + } while (left); +} + +static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP); +} + +static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); +} + +void __xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + if (is_device_dma_coherent(hwdev)) + return; + if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + return; + + __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); +} + +void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) + +{ + if (is_device_dma_coherent(hwdev)) + return; + if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + return; + + __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); +} + +void __xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (is_device_dma_coherent(hwdev)) + return; + __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); +} + +void __xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (is_device_dma_coherent(hwdev)) + return; + __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); +} + +bool xen_arch_need_swiotlb(struct device *dev, + unsigned long pfn, + unsigned long mfn) +{ + return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev)); +} + int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, unsigned int address_bits, dma_addr_t *dma_handle) @@ -56,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = { int __init xen_mm_init(void) { + struct gnttab_cache_flush cflush; if (!xen_initial_domain()) return 0; xen_swiotlb_init(1, false); xen_dma_ops = &xen_swiotlb_dma_ops; + + cflush.op = 0; + cflush.a.dev_bus_addr = 0; + cflush.offset = 0; + cflush.length = 0; + if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) + hypercall_cflush = true; return 0; } arch_initcall(xen_mm_init); diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c deleted file mode 100644 index 3b99860fd7ae..000000000000 --- a/arch/arm/xen/mm32.c +++ /dev/null @@ -1,202 +0,0 @@ -#include <linux/cpu.h> -#include <linux/dma-mapping.h> -#include <linux/gfp.h> -#include <linux/highmem.h> - -#include <xen/features.h> - -static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt); -static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep); - -static int alloc_xen_mm32_scratch_page(int cpu) -{ - struct page *page; - unsigned long virt; - pmd_t *pmdp; - pte_t *ptep; - - if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL) - return 0; - - page = alloc_page(GFP_KERNEL); - if (page == NULL) { - pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu); - return -ENOMEM; - } - - virt = (unsigned long)__va(page_to_phys(page)); - pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); - ptep = pte_offset_kernel(pmdp, virt); - - per_cpu(xen_mm32_scratch_virt, cpu) = virt; - per_cpu(xen_mm32_scratch_ptep, cpu) = ptep; - - return 0; -} - -static int xen_mm32_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - switch (action) { - case CPU_UP_PREPARE: - if (alloc_xen_mm32_scratch_page(cpu)) - return NOTIFY_BAD; - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block xen_mm32_cpu_notifier = { - .notifier_call = xen_mm32_cpu_notify, -}; - -static void* xen_mm32_remap_page(dma_addr_t handle) -{ - unsigned long virt = get_cpu_var(xen_mm32_scratch_virt); - pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep); - - *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL); - local_flush_tlb_kernel_page(virt); - - return (void*)virt; -} - -static void xen_mm32_unmap(void *vaddr) -{ - put_cpu_var(xen_mm32_scratch_virt); -} - - -/* functions called by SWIOTLB */ - -static void dma_cache_maint(dma_addr_t handle, unsigned long offset, - size_t size, enum dma_data_direction dir, - void (*op)(const void *, size_t, int)) -{ - unsigned long pfn; - size_t left = size; - - pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; - - do { - size_t len = left; - void *vaddr; - - if (!pfn_valid(pfn)) - { - /* Cannot map the page, we don't know its physical address. - * Return and hope for the best */ - if (!xen_feature(XENFEAT_grant_map_identity)) - return; - vaddr = xen_mm32_remap_page(handle) + offset; - op(vaddr, len, dir); - xen_mm32_unmap(vaddr - offset); - } else { - struct page *page = pfn_to_page(pfn); - - if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) - len = PAGE_SIZE - offset; - - if (cache_is_vipt_nonaliasing()) { - vaddr = kmap_atomic(page); - op(vaddr + offset, len, dir); - kunmap_atomic(vaddr); - } else { - vaddr = kmap_high_get(page); - if (vaddr) { - op(vaddr + offset, len, dir); - kunmap_high(page); - } - } - } else { - vaddr = page_address(page) + offset; - op(vaddr, len, dir); - } - } - - offset = 0; - pfn++; - left -= len; - } while (left); -} - -static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - /* Cannot use __dma_page_dev_to_cpu because we don't have a - * struct page for handle */ - - if (dir != DMA_TO_DEVICE) - outer_inv_range(handle, handle + size); - - dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area); -} - -static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - - dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area); - - if (dir == DMA_FROM_DEVICE) { - outer_inv_range(handle, handle + size); - } else { - outer_clean_range(handle, handle + size); - } -} - -void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) - -{ - if (!__generic_dma_ops(hwdev)->unmap_page) - return; - if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) - return; - - __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); -} - -void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (!__generic_dma_ops(hwdev)->sync_single_for_cpu) - return; - __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); -} - -void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (!__generic_dma_ops(hwdev)->sync_single_for_device) - return; - __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); -} - -int __init xen_mm32_init(void) -{ - int cpu; - - if (!xen_initial_domain()) - return 0; - - register_cpu_notifier(&xen_mm32_cpu_notifier); - get_online_cpus(); - for_each_online_cpu(cpu) { - if (alloc_xen_mm32_scratch_page(cpu)) { - put_online_cpus(); - unregister_cpu_notifier(&xen_mm32_cpu_notifier); - return -ENOMEM; - } - } - put_online_cpus(); - - return 0; -} -arch_initcall(xen_mm32_init); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6b1ebd964c10..b1f9a20a3677 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2,6 +2,7 @@ config ARM64 def_bool y select ARCH_BINFMT_ELF_RANDOMIZE_PIE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SG_CHAIN select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_USE_CMPXCHG_LOCKREF @@ -13,7 +14,9 @@ config ARM64 select ARM_ARCH_TIMER select ARM_GIC select AUDIT_ARCH_COMPAT_GENERIC + select ARM_GIC_V2M if PCI_MSI select ARM_GIC_V3 + select ARM_GIC_V3_ITS if PCI_MSI select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index a38b02ce5f9a..2cf32e9887e1 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -49,4 +49,8 @@ config CRYPTO_AES_ARM64_NEON_BLK select CRYPTO_AES select CRYPTO_ABLK_HELPER +config CRYPTO_CRC32_ARM64 + tristate "CRC32 and CRC32C using optional ARMv8 instructions" + depends on ARM64 + select CRYPTO_HASH endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index a3f935fde975..5720608c50b1 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4 CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS +obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o + +CFLAGS_crc32-arm64.o := -mcpu=generic+crc + $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE $(call if_changed_rule,cc_o_c) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 0ac73b838fa3..6c348df5bf36 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -296,4 +296,4 @@ module_exit(aes_mod_exit); MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ccm(aes)"); +MODULE_ALIAS_CRYPTO("ccm(aes)"); diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 801aae32841f..b1b5b893eb20 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -44,10 +44,10 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_xts_encrypt neon_aes_xts_encrypt #define aes_xts_decrypt neon_aes_xts_decrypt MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); -MODULE_ALIAS("ecb(aes)"); -MODULE_ALIAS("cbc(aes)"); -MODULE_ALIAS("ctr(aes)"); -MODULE_ALIAS("xts(aes)"); +MODULE_ALIAS_CRYPTO("ecb(aes)"); +MODULE_ALIAS_CRYPTO("cbc(aes)"); +MODULE_ALIAS_CRYPTO("ctr(aes)"); +MODULE_ALIAS_CRYPTO("xts(aes)"); #endif MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c new file mode 100644 index 000000000000..9499199924ae --- /dev/null +++ b/arch/arm64/crypto/crc32-arm64.c @@ -0,0 +1,274 @@ +/* + * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions + * + * Module based on crypto/crc32c_generic.c + * + * CRC32 loop taken from Ed Nevill's Hadoop CRC patch + * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E + * + * Using inline assembly instead of intrinsics in order to be backwards + * compatible with older compilers. + * + * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/unaligned/access_ok.h> +#include <linux/cpufeature.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <crypto/internal/hash.h> + +MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>"); +MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions"); +MODULE_LICENSE("GPL v2"); + +#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) + +static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len) +{ + s64 length = len; + + while ((length -= sizeof(u64)) >= 0) { + CRC32X(crc, get_unaligned_le64(p)); + p += sizeof(u64); + } + + /* The following is more efficient than the straight loop */ + if (length & sizeof(u32)) { + CRC32W(crc, get_unaligned_le32(p)); + p += sizeof(u32); + } + if (length & sizeof(u16)) { + CRC32H(crc, get_unaligned_le16(p)); + p += sizeof(u16); + } + if (length & sizeof(u8)) + CRC32B(crc, *p); + + return crc; +} + +static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len) +{ + s64 length = len; + + while ((length -= sizeof(u64)) >= 0) { + CRC32CX(crc, get_unaligned_le64(p)); + p += sizeof(u64); + } + + /* The following is more efficient than the straight loop */ + if (length & sizeof(u32)) { + CRC32CW(crc, get_unaligned_le32(p)); + p += sizeof(u32); + } + if (length & sizeof(u16)) { + CRC32CH(crc, get_unaligned_le16(p)); + p += sizeof(u16); + } + if (length & sizeof(u8)) + CRC32CB(crc, *p); + + return crc; +} + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = mctx->key; + + return 0; +} + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(tfm); + + if (keylen != sizeof(mctx->key)) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + mctx->key = get_unaligned_le32(key); + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length); + return 0; +} + +static int chksumc_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + put_unaligned_le32(~ctx->crc, out); + return 0; +} + +static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) +{ + put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out); + return 0; +} + +static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) +{ + put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(ctx->crc, data, len, out); +} + +static int chksumc_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksumc_finup(ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksum_finup(mctx->key, data, length, out); +} + +static int chksumc_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksumc_finup(mctx->key, data, length, out); +} + +static int crc32_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->key = ~0; + return 0; +} + +static struct shash_alg crc32_alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32", + .cra_driver_name = "crc32-arm64-hw", + .cra_priority = 300, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 0, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32_cra_init, + } +}; + +static struct shash_alg crc32c_alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksumc_update, + .final = chksum_final, + .finup = chksumc_finup, + .digest = chksumc_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-arm64-hw", + .cra_priority = 300, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 0, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32_cra_init, + } +}; + +static int __init crc32_mod_init(void) +{ + int err; + + err = crypto_register_shash(&crc32_alg); + + if (err) + return err; + + err = crypto_register_shash(&crc32c_alg); + + if (err) { + crypto_unregister_shash(&crc32_alg); + return err; + } + + return 0; +} + +static void __exit crc32_mod_exit(void) +{ + crypto_unregister_shash(&crc32_alg); + crypto_unregister_shash(&crc32c_alg); +} + +module_cpu_feature_match(CRC32, crc32_mod_init); +module_exit(crc32_mod_exit); diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index dc770bd4f5a5..55103e50c51b 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -14,7 +14,6 @@ generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += ftrace.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h @@ -28,6 +27,7 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mman.h generic-y += msgbuf.h +generic-y += msi.h generic-y += mutex.h generic-y += pci.h generic-y += pci-bridge.h diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 6389d60574d9..a5abb0062d6e 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -32,6 +32,9 @@ #define rmb() dsb(ld) #define wmb() dsb(st) +#define dma_rmb() dmb(oshld) +#define dma_wmb() dmb(oshst) + #ifndef CONFIG_SMP #define smp_mb() barrier() #define smp_rmb() barrier() diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index cf98b362094b..243ef256b8c9 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -21,6 +21,7 @@ struct dev_archdata { #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif + bool dma_coherent; }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index adeae3f6f0fc..d34189bceff7 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -54,11 +54,18 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) static inline int set_arch_dma_coherent_ops(struct device *dev) { + dev->archdata.dma_coherent = true; set_dma_ops(dev, &coherent_swiotlb_dma_ops); return 0; } #define set_arch_dma_coherent_ops set_arch_dma_coherent_ops +/* do not use this function in a driver */ +static inline bool is_device_dma_coherent(struct device *dev) +{ + return dev->archdata.dma_coherent; +} + #include <asm-generic/dma-mapping-common.h> static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 41a43bf26492..df22314f57cf 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -279,6 +279,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) #define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd))) diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index dde3fc9c49f0..2052102b4e02 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -1,43 +1 @@ -#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H -#define _ASM_ARM64_XEN_PAGE_COHERENT_H - -#include <asm/page.h> -#include <linux/dma-attrs.h> -#include <linux/dma-mapping.h> - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ -} - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ -} - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ -} - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ -} -#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ +#include <../../arm/include/asm/xen/page-coherent.h> diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 663da771580a..f1dbca7d5c96 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -511,7 +511,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu) static int psci_suspend_finisher(unsigned long index) { - struct psci_power_state *state = __get_cpu_var(psci_power_state); + struct psci_power_state *state = __this_cpu_read(psci_power_state); return psci_ops.cpu_suspend(state[index - 1], virt_to_phys(cpu_resume)); @@ -520,7 +520,7 @@ static int psci_suspend_finisher(unsigned long index) static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) { int ret; - struct psci_power_state *state = __get_cpu_var(psci_power_state); + struct psci_power_state *state = __this_cpu_read(psci_power_state); /* * idle state index 0 corresponds to wfi, should never be called * from the cpu_suspend operations @@ -540,6 +540,8 @@ const struct cpu_operations cpu_psci_ops = { .name = "psci", #ifdef CONFIG_CPU_IDLE .cpu_init_idle = cpu_psci_cpu_init_idle, +#endif +#ifdef CONFIG_ARM64_CPU_SUSPEND .cpu_suspend = cpu_psci_cpu_suspend, #endif #ifdef CONFIG_SMP diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index bf69601be546..cf33f33333cc 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -182,9 +182,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, static const char units[] = "KMGTPE"; u64 prot = val & pg_level[level].mask; - if (addr < LOWEST_ADDR) - return; - if (!st->level) { st->level = level; st->current_prot = prot; @@ -272,7 +269,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) { - pgd_t *pgd = pgd_offset(mm, 0); + pgd_t *pgd = pgd_offset(mm, 0UL); unsigned i; unsigned long addr; diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 2a71b1cb9848..528d70d47a54 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h generic-y += futex.h -generic-y += hash.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += local.h diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 6e6cd159924b..2b65ed6b277c 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -80,4 +80,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI__ASM_AVR32_SOCKET_H */ diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 46ed6bb9c679..4bd3c3cfc9ab 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -10,7 +10,6 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += fb.h generic-y += futex.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index 420006877998..dfb66fe88b34 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -22,6 +22,57 @@ # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) # define rmb() do { barrier(); smp_check_barrier(); } while (0) # define wmb() do { barrier(); smp_mark_barrier(); } while (0) +/* + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + */ # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) #endif diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h index 2d90d62edc97..d00d732784b1 100644 --- a/arch/blackfin/include/asm/bfin_serial.h +++ b/arch/blackfin/include/asm/bfin_serial.h @@ -9,8 +9,11 @@ #ifndef __BFIN_ASM_SERIAL_H__ #define __BFIN_ASM_SERIAL_H__ +#include <linux/circ_buf.h> #include <linux/serial_core.h> #include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/workqueue.h> #include <mach/anomaly.h> #include <mach/bfin_serial.h> @@ -25,10 +28,6 @@ # endif #endif -struct circ_buf; -struct timer_list; -struct work_struct; - struct bfin_serial_port { struct uart_port port; unsigned int old_status; diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index e77e0c1dbe75..2de73391b81e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -15,7 +15,6 @@ generic-y += exec.h generic-y += fb.h generic-y += fcntl.h generic-y += futex.h -generic-y += hash.h generic-y += hw_irq.h generic-y += io.h generic-y += ioctl.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 2ca489eaadd3..d5f124832fd1 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += linkage.h diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h index ed94e5ed0a23..e2503d9f1869 100644 --- a/arch/cris/include/uapi/asm/socket.h +++ b/arch/cris/include/uapi/asm/socket.h @@ -82,6 +82,11 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 3caf05cabfc5..e3f81b53578e 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index ca2c6e6f31c6..4823ad125578 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -80,5 +80,10 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 5f234a5a2320..c7a99f860b40 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index c84c88bbbbd7..536d13b0bea6 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -11,7 +11,6 @@ config IA64 select PCI if (!IA64_HP_SIM) select ACPI if (!IA64_HP_SIM) select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI - select PM if (!IA64_HP_SIM) select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE @@ -233,6 +232,7 @@ config IA64_SGI_UV config IA64_HP_SIM bool "Ski-simulator" select SWIOTLB + depends on !PM_RUNTIME endchoice diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 747320be9d0e..9b41b4bcc073 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index a48957c7b445..f6769eb2bbf9 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -35,26 +35,25 @@ * it's (presumably) much slower than mf and (b) mf.a is supported for * sequential memory pages only. */ -#define mb() ia64_mf() -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) +#define mb() ia64_mf() +#define rmb() mb() +#define wmb() mb() + +#define dma_rmb() mb() +#define dma_wmb() mb() #ifdef CONFIG_SMP # define smp_mb() mb() -# define smp_rmb() rmb() -# define smp_wmb() wmb() -# define smp_read_barrier_depends() read_barrier_depends() - #else - # define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -# define smp_read_barrier_depends() do { } while(0) - #endif +#define smp_rmb() smp_mb() +#define smp_wmb() smp_mb() + +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index 4ea6225196bb..bce9bc1a66c4 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -63,6 +63,10 @@ typedef struct siginfo { unsigned int _flags; /* see below */ unsigned long _isr; /* isr */ short _addr_lsb; /* lsb of faulting address */ + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; } _sigfault; /* SIGPOLL */ @@ -110,9 +114,9 @@ typedef struct siginfo { /* * SIGSEGV si_codes */ -#define __SEGV_PSTKOVF (__SI_FAULT|3) /* paragraph stack overflow */ +#define __SEGV_PSTKOVF (__SI_FAULT|4) /* paragraph stack overflow */ #undef NSIGSEGV -#define NSIGSEGV 3 +#define NSIGSEGV 4 #undef NSIGTRAP #define NSIGTRAP 4 diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index a1b49bac7951..59be3d87f86d 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -89,4 +89,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 8c3730c3c63d..8ae36ea177d3 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -35,7 +35,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); cpumask_copy(idata->affinity, cpumask_of(cpu)); return 0; @@ -71,7 +71,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) MSI_DATA_DELIVERY_FIXED | MSI_DATA_VECTOR(vector); - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); return 0; @@ -102,8 +102,8 @@ static int ia64_msi_retrigger_irq(struct irq_data *data) */ static struct irq_chip ia64_msi_chip = { .name = "PCI-MSI", - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, .irq_ack = ia64_ack_msi_irq, #ifdef CONFIG_SMP .irq_set_affinity = ia64_set_msi_irq_affinity, diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5845ffea67c3..dc063fe6646a 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2662,7 +2662,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ret = -ENOMEM; - fd = get_unused_fd(); + fd = get_unused_fd_flags(0); if (fd < 0) return fd; diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 446e7799928c..a0eb27b66d13 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -145,7 +145,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) msg.data = 0x100 + irq; irq_set_msi_desc(irq, entry); - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); return 0; @@ -205,7 +205,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data, msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); cpumask_copy(data->affinity, cpu_mask); return 0; @@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(struct irq_data *data) static struct irq_chip sn_msi_chip = { .name = "PCI-MSI", - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, .irq_ack = sn_ack_msi_irq, #ifdef CONFIG_SMP .irq_set_affinity = sn_set_msi_irq_affinity, diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 3796801d6e0c..2edc793372fc 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += module.h diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 6c9a24b3aefa..7bc4cb273856 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -80,4 +80,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index dbaf9f3065e8..9b6c691874bd 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 7b8111c8f937..0bf5d525b945 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -13,7 +13,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index c7591e80067c..d703d8e26a65 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -4,8 +4,6 @@ #include <asm/metag_mem.h> #define nop() asm volatile ("NOP") -#define mb() wmb() -#define rmb() barrier() #ifdef CONFIG_METAG_META21 @@ -41,13 +39,13 @@ static inline void wr_fence(void) #endif /* !CONFIG_METAG_META21 */ -static inline void wmb(void) -{ - /* flush writes through the write combiner */ - wr_fence(); -} +/* flush writes through the write combiner */ +#define mb() wr_fence() +#define rmb() barrier() +#define wmb() mb() -#define read_barrier_depends() do { } while (0) +#define dma_rmb() rmb() +#define dma_wmb() wmb() #ifndef CONFIG_SMP #define fence() do { } while (0) @@ -82,7 +80,10 @@ static inline void fence(void) #define smp_wmb() barrier() #endif #endif -#define smp_read_barrier_depends() do { } while (0) + +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define smp_store_release(p, v) \ diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index a7736fa0580c..0bce820428fc 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,5 +1,6 @@ config MICROBLAZE def_bool y + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_OPTIONAL_GPIOLIB diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 448143b8cabd..ab564a6db5c3 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -4,7 +4,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += device.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index f5e18bf3275e..e5fc463b36d0 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -2,7 +2,9 @@ platforms += alchemy platforms += ar7 +platforms += ath25 platforms += ath79 +platforms += bcm3384 platforms += bcm47xx platforms += bcm63xx platforms += cavium-octeon diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 9536ef912f59..3289969ee423 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -53,6 +53,7 @@ config MIPS select HAVE_CC_STACKPROTECTOR select CPU_PM if CPU_IDLE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_BINFMT_ELF_STATE menu "Machine selection" @@ -62,7 +63,7 @@ choice config MIPS_ALCHEMY bool "Alchemy processor based machines" - select 64BIT_PHYS_ADDR + select ARCH_PHYS_ADDR_T_64BIT select CEVT_R4K select CSRC_R4K select IRQ_CPU @@ -96,6 +97,20 @@ config AR7 Support for the Texas Instruments AR7 System-on-a-Chip family: TNETD7100, 7200 and 7300. +config ATH25 + bool "Atheros AR231x/AR531x SoC support" + select CEVT_R4K + select CSRC_R4K + select DMA_NONCOHERENT + select IRQ_CPU + select IRQ_DOMAIN + select SYS_HAS_CPU_MIPS32_R1 + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_HAS_EARLY_PRINTK + help + Support for Atheros AR231x and Atheros AR531x based boards + config ATH79 bool "Atheros AR71XX/AR724X/AR913X based boards" select ARCH_REQUIRE_GPIOLIB @@ -115,6 +130,32 @@ config ATH79 help Support for the Atheros AR71XX/AR724X/AR913X SoCs. +config BCM3384 + bool "Broadcom BCM3384 based boards" + select BOOT_RAW + select NO_EXCEPT_FILL + select USE_OF + select CEVT_R4K + select CSRC_R4K + select SYNC_R4K + select COMMON_CLK + select DMA_NONCOHERENT + select IRQ_CPU + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_HIGHMEM + select SYS_HAS_CPU_BMIPS5000 + select SWAP_IO_SPACE + select USB_EHCI_BIG_ENDIAN_DESC + select USB_EHCI_BIG_ENDIAN_MMIO + select USB_OHCI_BIG_ENDIAN_DESC + select USB_OHCI_BIG_ENDIAN_MMIO + help + Support for BCM3384 based boards. BCM3384/BCM33843 is a cable modem + chipset with a Linux application processor that is often used to + provide Samba services, a CUPS print server, and/or advanced routing + features. + config BCM47XX bool "Broadcom BCM47XX based boards" select ARCH_WANT_OPTIONAL_GPIOLIB @@ -269,6 +310,8 @@ config LANTIQ select USE_OF select PINCTRL select PINCTRL_LANTIQ + select ARCH_HAS_RESET_CONTROLLER + select RESET_CONTROLLER config LASAT bool "LASAT Networks platforms" @@ -315,17 +358,18 @@ config MIPS_MALTA select BOOT_RAW select CEVT_R4K select CSRC_R4K - select CSRC_GIC + select CLKSRC_MIPS_GIC select DMA_MAYBE_COHERENT select GENERIC_ISA_DMA select HAVE_PCSPKR_PLATFORM select IRQ_CPU - select IRQ_GIC + select MIPS_GIC select HW_HAS_PCI select I8253 select I8259 select MIPS_BONITO64 select MIPS_CPU_SCACHE + select MIPS_L1_CACHE_SHIFT_6 select PCI_GT64XXX_PCI0 select MIPS_MSC select SWAP_IO_SPACE @@ -340,6 +384,7 @@ config MIPS_MALTA select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN + select SYS_SUPPORTS_MICROMIPS select SYS_SUPPORTS_MIPS_CMP select SYS_SUPPORTS_MIPS_CPS select SYS_SUPPORTS_MIPS16 @@ -357,12 +402,12 @@ config MIPS_SEAD3 select BUILTIN_DTB select CEVT_R4K select CSRC_R4K - select CSRC_GIC + select CLKSRC_MIPS_GIC select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select DMA_NONCOHERENT select IRQ_CPU - select IRQ_GIC + select MIPS_GIC select LIBFDT select MIPS_MSC select SYS_HAS_CPU_MIPS32_R1 @@ -726,7 +771,7 @@ config MIKROTIK_RB532 config CAVIUM_OCTEON_SOC bool "Cavium Networks Octeon SoC based boards" select CEVT_R4K - select 64BIT_PHYS_ADDR + select ARCH_PHYS_ADDR_T_64BIT select DMA_COHERENT select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN @@ -768,7 +813,7 @@ config NLM_XLR_BOARD select SWAP_IO_SPACE select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL - select 64BIT_PHYS_ADDR + select ARCH_PHYS_ADDR_T_64BIT select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_HIGHMEM select DMA_COHERENT @@ -794,7 +839,7 @@ config NLM_XLP_BOARD select HW_HAS_PCI select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL - select 64BIT_PHYS_ADDR + select ARCH_PHYS_ADDR_T_64BIT select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM @@ -835,6 +880,7 @@ config MIPS_PARAVIRT endchoice source "arch/mips/alchemy/Kconfig" +source "arch/mips/ath25/Kconfig" source "arch/mips/ath79/Kconfig" source "arch/mips/bcm47xx/Kconfig" source "arch/mips/bcm63xx/Kconfig" @@ -907,10 +953,6 @@ config CEVT_GT641XX config CEVT_R4K bool -config CEVT_GIC - select MIPS_CM - bool - config CEVT_SB1250 bool @@ -926,10 +968,6 @@ config CSRC_IOASIC config CSRC_R4K bool -config CSRC_GIC - select MIPS_CM - bool - config CSRC_SB1250 bool @@ -941,7 +979,7 @@ config FW_CFE bool config ARCH_DMA_ADDR_T_64BIT - def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT + def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT config DMA_MAYBE_COHERENT select DMA_NONCOHERENT @@ -975,6 +1013,7 @@ config SYS_SUPPORTS_HOTPLUG_CPU config I8259 bool + select IRQ_DOMAIN config MIPS_BONITO64 bool @@ -1055,6 +1094,7 @@ config MIPS_HUGE_TLB_SUPPORT config IRQ_CPU bool + select IRQ_DOMAIN config IRQ_CPU_RM7K bool @@ -1071,10 +1111,6 @@ config IRQ_TXX9 config IRQ_GT641XX bool -config IRQ_GIC - select MIPS_CM - bool - config PCI_GT64XXX_PCI0 bool @@ -1574,6 +1610,7 @@ config CPU_LOONGSON1 select CPU_HAS_PREFETCH select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_CPUFREQ config CPU_BMIPS32_3300 select SMP_UP if SMP @@ -1586,12 +1623,14 @@ config CPU_BMIPS4350 config CPU_BMIPS4380 bool + select MIPS_L1_CACHE_SHIFT_6 select SYS_SUPPORTS_SMP select SYS_SUPPORTS_HOTPLUG_CPU config CPU_BMIPS5000 bool select MIPS_CPU_SCACHE + select MIPS_L1_CACHE_SHIFT_7 select SYS_SUPPORTS_SMP select SYS_SUPPORTS_HOTPLUG_CPU @@ -1886,15 +1925,6 @@ config FORCE_MAX_ZONEORDER The page size is not necessarily 4KB. Keep this in mind when choosing a value for this option. -config CEVT_GIC - bool "Use GIC global counter for clock events" - depends on IRQ_GIC && !MIPS_SEAD3 - help - Use the GIC global counter for the clock events. The R4K clock - event driver is always present, so if the platform ends up not - detecting a GIC, it will fall back to the R4K timer for the - generation of clock events. - config BOARD_SCACHE bool @@ -1908,7 +1938,6 @@ config IP22_CPU_SCACHE config MIPS_CPU_SCACHE bool select BOARD_SCACHE - select MIPS_L1_CACHE_SHIFT_6 config R5000_CPU_SCACHE bool @@ -2095,11 +2124,8 @@ config SB1_PASS_2_1_WORKAROUNDS default y -config 64BIT_PHYS_ADDR - bool - config ARCH_PHYS_ADDR_T_64BIT - def_bool 64BIT_PHYS_ADDR + bool choice prompt "SmartMIPS or microMIPS ASE support" @@ -2122,7 +2148,7 @@ config CPU_HAS_SMARTMIPS here. config CPU_MICROMIPS - depends on SYS_SUPPORTS_MICROMIPS + depends on 32BIT && SYS_SUPPORTS_MICROMIPS bool "microMIPS" help When this option is enabled the kernel will be built using the diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 3a2b775e8458..88a9f433f6fc 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -122,4 +122,17 @@ config SPINLOCK_TEST help Add several files to the debugfs to test spinlock speed. +config FP32XX_HYBRID_FPRS + bool "Run FP32 & FPXX code with hybrid FPRs" + depends on MIPS_O32_FP64_SUPPORT + help + The hybrid FPR scheme is normally used only when a program needs to + execute a mix of FP32 & FP64A code, since the trapping & emulation + that it entails is expensive. When enabled, this option will lead + to the kernel running programs which use the FP32 & FPXX FP ABIs + using the hybrid FPR scheme, which can be useful for debugging + purposes. + + If unsure, say N. + endmenu diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 58076472bdd8..2563a088d3b8 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -380,6 +380,7 @@ define archhelp echo ' vmlinux.ecoff - ECOFF boot image' echo ' vmlinux.bin - Raw binary boot image' echo ' vmlinux.srec - SREC boot image' + echo ' vmlinux.32 - 64-bit boot image wrapped in 32bits (IP22/IP32)' echo ' vmlinuz - Compressed boot(zboot) image' echo ' vmlinuz.ecoff - ECOFF zboot image' echo ' vmlinuz.bin - Raw binary zboot image' diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index d7557cde271a..203e4403c366 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -37,7 +37,6 @@ #include <linux/io.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> -#include <linux/clk-private.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> @@ -397,10 +396,10 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, break; /* if this parent is currently unused, remember it. - * XXX: I know it's a layering violation, but it works - * so well.. (if (!clk_has_active_children(pc)) ) + * XXX: we would actually want clk_has_active_children() + * but this is a good-enough approximation for now. */ - if (pc->prepare_count == 0) { + if (!__clk_is_prepared(pc)) { if (!free) free = pc; } diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index ea8f41869e56..4e72daf12c32 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c @@ -70,9 +70,9 @@ void __init plat_mem_setup(void) iomem_resource.end = IOMEM_RESOURCE_END; } -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_PCI) /* This routine should be valid for all Au1x based boards */ -phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) +phys_addr_t __fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { unsigned long start = ALCHEMY_PCI_MEMWIN_START; unsigned long end = ALCHEMY_PCI_MEMWIN_END; @@ -83,7 +83,7 @@ phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) /* Check for PCI memory window */ if (phys_addr >= start && (phys_addr + size - 1) <= end) - return (phys_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr); + return (phys_addr_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr); /* default nop */ return phys_addr; diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 7e2356fd5fd6..af2441dbfc12 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -311,8 +311,7 @@ static void __init cpmac_get_mac(int instance, unsigned char *dev_addr) &dev_addr[0], &dev_addr[1], &dev_addr[2], &dev_addr[3], &dev_addr[4], &dev_addr[5]) != 6) { - pr_warning("cannot parse mac address, " - "using random address\n"); + pr_warn("cannot parse mac address, using random address\n"); eth_random_addr(dev_addr); } } else @@ -665,7 +664,7 @@ static int __init ar7_register_devices(void) res = platform_device_register(&physmap_flash); if (res) - pr_warning("unable to register physmap-flash: %d\n", res); + pr_warn("unable to register physmap-flash: %d\n", res); if (ar7_is_titan()) titan_fixup_devices(); @@ -673,13 +672,13 @@ static int __init ar7_register_devices(void) ar7_device_disable(vlynq_low_data.reset_bit); res = platform_device_register(&vlynq_low); if (res) - pr_warning("unable to register vlynq-low: %d\n", res); + pr_warn("unable to register vlynq-low: %d\n", res); if (ar7_has_high_vlynq()) { ar7_device_disable(vlynq_high_data.reset_bit); res = platform_device_register(&vlynq_high); if (res) - pr_warning("unable to register vlynq-high: %d\n", res); + pr_warn("unable to register vlynq-high: %d\n", res); } if (ar7_has_high_cpmac()) { @@ -689,9 +688,10 @@ static int __init ar7_register_devices(void) res = platform_device_register(&cpmac_high); if (res) - pr_warning("unable to register cpmac-high: %d\n", res); + pr_warn("unable to register cpmac-high: %d\n", + res); } else - pr_warning("unable to add cpmac-high phy: %d\n", res); + pr_warn("unable to add cpmac-high phy: %d\n", res); } else cpmac_low_data.phy_mask = 0xffffffff; @@ -700,18 +700,18 @@ static int __init ar7_register_devices(void) cpmac_get_mac(0, cpmac_low_data.dev_addr); res = platform_device_register(&cpmac_low); if (res) - pr_warning("unable to register cpmac-low: %d\n", res); + pr_warn("unable to register cpmac-low: %d\n", res); } else - pr_warning("unable to add cpmac-low phy: %d\n", res); + pr_warn("unable to add cpmac-low phy: %d\n", res); detect_leds(); res = platform_device_register(&ar7_gpio_leds); if (res) - pr_warning("unable to register leds: %d\n", res); + pr_warn("unable to register leds: %d\n", res); res = platform_device_register(&ar7_udc); if (res) - pr_warning("unable to register usb slave: %d\n", res); + pr_warn("unable to register usb slave: %d\n", res); /* Register watchdog only if enabled in hardware */ bootcr = ioremap_nocache(AR7_REGS_DCL, 4); @@ -726,7 +726,7 @@ static int __init ar7_register_devices(void) ar7_wdt_res.end = ar7_wdt_res.start + 0x20; res = platform_device_register(&ar7_wdt); if (res) - pr_warning("unable to register watchdog: %d\n", res); + pr_warn("unable to register watchdog: %d\n", res); } return 0; diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig new file mode 100644 index 000000000000..fc19dd57e42d --- /dev/null +++ b/arch/mips/ath25/Kconfig @@ -0,0 +1,16 @@ +config SOC_AR5312 + bool "Atheros AR5312/AR2312+ SoC support" + depends on ATH25 + default y + +config SOC_AR2315 + bool "Atheros AR2315+ SoC support" + depends on ATH25 + default y + +config PCI_AR2315 + bool "Atheros AR2315 PCI controller support" + depends on SOC_AR2315 + select HW_HAS_PCI + select PCI + default y diff --git a/arch/mips/ath25/Makefile b/arch/mips/ath25/Makefile new file mode 100644 index 000000000000..eabad7da446a --- /dev/null +++ b/arch/mips/ath25/Makefile @@ -0,0 +1,16 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2006 FON Technology, SL. +# Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> +# Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org> +# + +obj-y += board.o prom.o devices.o + +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + +obj-$(CONFIG_SOC_AR5312) += ar5312.o +obj-$(CONFIG_SOC_AR2315) += ar2315.o diff --git a/arch/mips/ath25/Platform b/arch/mips/ath25/Platform new file mode 100644 index 000000000000..ef3f81fa080b --- /dev/null +++ b/arch/mips/ath25/Platform @@ -0,0 +1,6 @@ +# +# Atheros AR531X/AR231X WiSoC +# +platform-$(CONFIG_ATH25) += ath25/ +cflags-$(CONFIG_ATH25) += -I$(srctree)/arch/mips/include/asm/mach-ath25 +load-$(CONFIG_ATH25) += 0xffffffff80041000 diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c new file mode 100644 index 000000000000..2befa7d766a6 --- /dev/null +++ b/arch/mips/ath25/ar2315.c @@ -0,0 +1,364 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. + * Copyright (C) 2006 FON Technology, SL. + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com> + */ + +/* + * Platform devices for Atheros AR2315 SoCs + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <asm/bootinfo.h> +#include <asm/reboot.h> +#include <asm/time.h> + +#include <ath25_platform.h> + +#include "devices.h" +#include "ar2315.h" +#include "ar2315_regs.h" + +static void __iomem *ar2315_rst_base; +static struct irq_domain *ar2315_misc_irq_domain; + +static inline u32 ar2315_rst_reg_read(u32 reg) +{ + return __raw_readl(ar2315_rst_base + reg); +} + +static inline void ar2315_rst_reg_write(u32 reg, u32 val) +{ + __raw_writel(val, ar2315_rst_base + reg); +} + +static inline void ar2315_rst_reg_mask(u32 reg, u32 mask, u32 val) +{ + u32 ret = ar2315_rst_reg_read(reg); + + ret &= ~mask; + ret |= val; + ar2315_rst_reg_write(reg, ret); +} + +static irqreturn_t ar2315_ahb_err_handler(int cpl, void *dev_id) +{ + ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET); + ar2315_rst_reg_read(AR2315_AHB_ERR1); + + pr_emerg("AHB fatal error\n"); + machine_restart("AHB error"); /* Catastrophic failure */ + + return IRQ_HANDLED; +} + +static struct irqaction ar2315_ahb_err_interrupt = { + .handler = ar2315_ahb_err_handler, + .name = "ar2315-ahb-error", +}; + +static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc) +{ + u32 pending = ar2315_rst_reg_read(AR2315_ISR) & + ar2315_rst_reg_read(AR2315_IMR); + unsigned nr, misc_irq = 0; + + if (pending) { + struct irq_domain *domain = irq_get_handler_data(irq); + + nr = __ffs(pending); + misc_irq = irq_find_mapping(domain, nr); + } + + if (misc_irq) { + if (nr == AR2315_MISC_IRQ_GPIO) + ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO); + else if (nr == AR2315_MISC_IRQ_WATCHDOG) + ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD); + generic_handle_irq(misc_irq); + } else { + spurious_interrupt(); + } +} + +static void ar2315_misc_irq_unmask(struct irq_data *d) +{ + ar2315_rst_reg_mask(AR2315_IMR, 0, BIT(d->hwirq)); +} + +static void ar2315_misc_irq_mask(struct irq_data *d) +{ + ar2315_rst_reg_mask(AR2315_IMR, BIT(d->hwirq), 0); +} + +static struct irq_chip ar2315_misc_irq_chip = { + .name = "ar2315-misc", + .irq_unmask = ar2315_misc_irq_unmask, + .irq_mask = ar2315_misc_irq_mask, +}; + +static int ar2315_misc_irq_map(struct irq_domain *d, unsigned irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &ar2315_misc_irq_chip, handle_level_irq); + return 0; +} + +static struct irq_domain_ops ar2315_misc_irq_domain_ops = { + .map = ar2315_misc_irq_map, +}; + +/* + * Called when an interrupt is received, this function + * determines exactly which interrupt it was, and it + * invokes the appropriate handler. + * + * Implicitly, we also define interrupt priority by + * choosing which to dispatch first. + */ +static void ar2315_irq_dispatch(void) +{ + u32 pending = read_c0_status() & read_c0_cause(); + + if (pending & CAUSEF_IP3) + do_IRQ(AR2315_IRQ_WLAN0); +#ifdef CONFIG_PCI_AR2315 + else if (pending & CAUSEF_IP5) + do_IRQ(AR2315_IRQ_LCBUS_PCI); +#endif + else if (pending & CAUSEF_IP2) + do_IRQ(AR2315_IRQ_MISC); + else if (pending & CAUSEF_IP7) + do_IRQ(ATH25_IRQ_CPU_CLOCK); + else + spurious_interrupt(); +} + +void __init ar2315_arch_init_irq(void) +{ + struct irq_domain *domain; + unsigned irq; + + ath25_irq_dispatch = ar2315_irq_dispatch; + + domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT, + &ar2315_misc_irq_domain_ops, NULL); + if (!domain) + panic("Failed to add IRQ domain"); + + irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB); + setup_irq(irq, &ar2315_ahb_err_interrupt); + + irq_set_chained_handler(AR2315_IRQ_MISC, ar2315_misc_irq_handler); + irq_set_handler_data(AR2315_IRQ_MISC, domain); + + ar2315_misc_irq_domain = domain; +} + +void __init ar2315_init_devices(void) +{ + /* Find board configuration */ + ath25_find_config(AR2315_SPI_READ_BASE, AR2315_SPI_READ_SIZE); + + ath25_add_wmac(0, AR2315_WLAN0_BASE, AR2315_IRQ_WLAN0); +} + +static void ar2315_restart(char *command) +{ + void (*mips_reset_vec)(void) = (void *)0xbfc00000; + + local_irq_disable(); + + /* try reset the system via reset control */ + ar2315_rst_reg_write(AR2315_COLD_RESET, AR2317_RESET_SYSTEM); + + /* Cold reset does not work on the AR2315/6, use the GPIO reset bits + * a workaround. Give it some time to attempt a gpio based hardware + * reset (atheros reference design workaround) */ + + /* TODO: implement the GPIO reset workaround */ + + /* Some boards (e.g. Senao EOC-2610) don't implement the reset logic + * workaround. Attempt to jump to the mips reset location - + * the boot loader itself might be able to recover the system */ + mips_reset_vec(); +} + +/* + * This table is indexed by bits 5..4 of the CLOCKCTL1 register + * to determine the predevisor value. + */ +static int clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 }; +static int pllc_divide_table[5] __initdata = { 2, 3, 4, 6, 3 }; + +static unsigned __init ar2315_sys_clk(u32 clock_ctl) +{ + unsigned int pllc_ctrl, cpu_div; + unsigned int pllc_out, refdiv, fdiv, divby2; + unsigned int clk_div; + + pllc_ctrl = ar2315_rst_reg_read(AR2315_PLLC_CTL); + refdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_REF_DIV); + refdiv = clockctl1_predivide_table[refdiv]; + fdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_FDBACK_DIV); + divby2 = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_ADD_FDBACK_DIV) + 1; + pllc_out = (40000000 / refdiv) * (2 * divby2) * fdiv; + + /* clkm input selected */ + switch (clock_ctl & AR2315_CPUCLK_CLK_SEL_M) { + case 0: + case 1: + clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKM_DIV); + clk_div = pllc_divide_table[clk_div]; + break; + case 2: + clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKC_DIV); + clk_div = pllc_divide_table[clk_div]; + break; + default: + pllc_out = 40000000; + clk_div = 1; + break; + } + + cpu_div = ATH25_REG_MS(clock_ctl, AR2315_CPUCLK_CLK_DIV); + cpu_div = cpu_div * 2 ?: 1; + + return pllc_out / (clk_div * cpu_div); +} + +static inline unsigned ar2315_cpu_frequency(void) +{ + return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_CPUCLK)); +} + +static inline unsigned ar2315_apb_frequency(void) +{ + return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_AMBACLK)); +} + +void __init ar2315_plat_time_init(void) +{ + mips_hpt_frequency = ar2315_cpu_frequency() / 2; +} + +void __init ar2315_plat_mem_setup(void) +{ + void __iomem *sdram_base; + u32 memsize, memcfg; + u32 devid; + u32 config; + + /* Detect memory size */ + sdram_base = ioremap_nocache(AR2315_SDRAMCTL_BASE, + AR2315_SDRAMCTL_SIZE); + memcfg = __raw_readl(sdram_base + AR2315_MEM_CFG); + memsize = 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_DATA_WIDTH); + memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_COL_WIDTH); + memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_ROW_WIDTH); + memsize <<= 3; + add_memory_region(0, memsize, BOOT_MEM_RAM); + iounmap(sdram_base); + + ar2315_rst_base = ioremap_nocache(AR2315_RST_BASE, AR2315_RST_SIZE); + + /* Detect the hardware based on the device ID */ + devid = ar2315_rst_reg_read(AR2315_SREV) & AR2315_REV_CHIP; + switch (devid) { + case 0x91: /* Need to check */ + ath25_soc = ATH25_SOC_AR2318; + break; + case 0x90: + ath25_soc = ATH25_SOC_AR2317; + break; + case 0x87: + ath25_soc = ATH25_SOC_AR2316; + break; + case 0x86: + default: + ath25_soc = ATH25_SOC_AR2315; + break; + } + ath25_board.devid = devid; + + /* Clear any lingering AHB errors */ + config = read_c0_config(); + write_c0_config(config & ~0x3); + ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET); + ar2315_rst_reg_read(AR2315_AHB_ERR1); + ar2315_rst_reg_write(AR2315_WDT_CTRL, AR2315_WDT_CTRL_IGNORE); + + _machine_restart = ar2315_restart; +} + +#ifdef CONFIG_PCI_AR2315 +static struct resource ar2315_pci_res[] = { + { + .name = "ar2315-pci-ctrl", + .flags = IORESOURCE_MEM, + .start = AR2315_PCI_BASE, + .end = AR2315_PCI_BASE + AR2315_PCI_SIZE - 1, + }, + { + .name = "ar2315-pci-ext", + .flags = IORESOURCE_MEM, + .start = AR2315_PCI_EXT_BASE, + .end = AR2315_PCI_EXT_BASE + AR2315_PCI_EXT_SIZE - 1, + }, + { + .name = "ar2315-pci", + .flags = IORESOURCE_IRQ, + .start = AR2315_IRQ_LCBUS_PCI, + .end = AR2315_IRQ_LCBUS_PCI, + }, +}; +#endif + +void __init ar2315_arch_init(void) +{ + unsigned irq = irq_create_mapping(ar2315_misc_irq_domain, + AR2315_MISC_IRQ_UART0); + + ath25_serial_setup(AR2315_UART0_BASE, irq, ar2315_apb_frequency()); + +#ifdef CONFIG_PCI_AR2315 + if (ath25_soc == ATH25_SOC_AR2315) { + /* Reset PCI DMA logic */ + ar2315_rst_reg_mask(AR2315_RESET, 0, AR2315_RESET_PCIDMA); + msleep(20); + ar2315_rst_reg_mask(AR2315_RESET, AR2315_RESET_PCIDMA, 0); + msleep(20); + + /* Configure endians */ + ar2315_rst_reg_mask(AR2315_ENDIAN_CTL, 0, AR2315_CONFIG_PCIAHB | + AR2315_CONFIG_PCIAHB_BRIDGE); + + /* Configure as PCI host with DMA */ + ar2315_rst_reg_write(AR2315_PCICLK, AR2315_PCICLK_PLLC_CLKM | + (AR2315_PCICLK_IN_FREQ_DIV_6 << + AR2315_PCICLK_DIV_S)); + ar2315_rst_reg_mask(AR2315_AHB_ARB_CTL, 0, AR2315_ARB_PCI); + ar2315_rst_reg_mask(AR2315_IF_CTL, AR2315_IF_PCI_CLK_MASK | + AR2315_IF_MASK, AR2315_IF_PCI | + AR2315_IF_PCI_HOST | AR2315_IF_PCI_INTR | + (AR2315_IF_PCI_CLK_OUTPUT_CLK << + AR2315_IF_PCI_CLK_SHIFT)); + + platform_device_register_simple("ar2315-pci", -1, + ar2315_pci_res, + ARRAY_SIZE(ar2315_pci_res)); + } +#endif +} diff --git a/arch/mips/ath25/ar2315.h b/arch/mips/ath25/ar2315.h new file mode 100644 index 000000000000..877afe63eed5 --- /dev/null +++ b/arch/mips/ath25/ar2315.h @@ -0,0 +1,22 @@ +#ifndef __AR2315_H +#define __AR2315_H + +#ifdef CONFIG_SOC_AR2315 + +void ar2315_arch_init_irq(void); +void ar2315_init_devices(void); +void ar2315_plat_time_init(void); +void ar2315_plat_mem_setup(void); +void ar2315_arch_init(void); + +#else + +static inline void ar2315_arch_init_irq(void) {} +static inline void ar2315_init_devices(void) {} +static inline void ar2315_plat_time_init(void) {} +static inline void ar2315_plat_mem_setup(void) {} +static inline void ar2315_arch_init(void) {} + +#endif + +#endif /* __AR2315_H */ diff --git a/arch/mips/ath25/ar2315_regs.h b/arch/mips/ath25/ar2315_regs.h new file mode 100644 index 000000000000..16e86149cb74 --- /dev/null +++ b/arch/mips/ath25/ar2315_regs.h @@ -0,0 +1,410 @@ +/* + * Register definitions for AR2315+ + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. + * Copyright (C) 2006 FON Technology, SL. + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2006-2008 Felix Fietkau <nbd@openwrt.org> + */ + +#ifndef __ASM_MACH_ATH25_AR2315_REGS_H +#define __ASM_MACH_ATH25_AR2315_REGS_H + +/* + * IRQs + */ +#define AR2315_IRQ_MISC (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */ +#define AR2315_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */ +#define AR2315_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */ +#define AR2315_IRQ_LCBUS_PCI (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */ +#define AR2315_IRQ_WLAN0_POLL (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */ + +/* + * Miscellaneous interrupts, which share IP2. + */ +#define AR2315_MISC_IRQ_UART0 0 +#define AR2315_MISC_IRQ_I2C_RSVD 1 +#define AR2315_MISC_IRQ_SPI 2 +#define AR2315_MISC_IRQ_AHB 3 +#define AR2315_MISC_IRQ_APB 4 +#define AR2315_MISC_IRQ_TIMER 5 +#define AR2315_MISC_IRQ_GPIO 6 +#define AR2315_MISC_IRQ_WATCHDOG 7 +#define AR2315_MISC_IRQ_IR_RSVD 8 +#define AR2315_MISC_IRQ_COUNT 9 + +/* + * Address map + */ +#define AR2315_SPI_READ_BASE 0x08000000 /* SPI flash */ +#define AR2315_SPI_READ_SIZE 0x01000000 +#define AR2315_WLAN0_BASE 0x10000000 /* Wireless MMR */ +#define AR2315_PCI_BASE 0x10100000 /* PCI MMR */ +#define AR2315_PCI_SIZE 0x00001000 +#define AR2315_SDRAMCTL_BASE 0x10300000 /* SDRAM MMR */ +#define AR2315_SDRAMCTL_SIZE 0x00000020 +#define AR2315_LOCAL_BASE 0x10400000 /* Local bus MMR */ +#define AR2315_ENET0_BASE 0x10500000 /* Ethernet MMR */ +#define AR2315_RST_BASE 0x11000000 /* Reset control MMR */ +#define AR2315_RST_SIZE 0x00000100 +#define AR2315_UART0_BASE 0x11100000 /* UART MMR */ +#define AR2315_SPI_MMR_BASE 0x11300000 /* SPI flash MMR */ +#define AR2315_SPI_MMR_SIZE 0x00000010 +#define AR2315_PCI_EXT_BASE 0x80000000 /* PCI external */ +#define AR2315_PCI_EXT_SIZE 0x40000000 + +/* + * Configuration registers + */ + +/* Cold reset register */ +#define AR2315_COLD_RESET 0x0000 + +#define AR2315_RESET_COLD_AHB 0x00000001 +#define AR2315_RESET_COLD_APB 0x00000002 +#define AR2315_RESET_COLD_CPU 0x00000004 +#define AR2315_RESET_COLD_CPUWARM 0x00000008 +#define AR2315_RESET_SYSTEM (RESET_COLD_CPU |\ + RESET_COLD_APB |\ + RESET_COLD_AHB) /* full system */ +#define AR2317_RESET_SYSTEM 0x00000010 + +/* Reset register */ +#define AR2315_RESET 0x0004 + +#define AR2315_RESET_WARM_WLAN0_MAC 0x00000001 /* warm reset WLAN0 MAC */ +#define AR2315_RESET_WARM_WLAN0_BB 0x00000002 /* warm reset WLAN0 BB */ +#define AR2315_RESET_MPEGTS_RSVD 0x00000004 /* warm reset MPEG-TS */ +#define AR2315_RESET_PCIDMA 0x00000008 /* warm reset PCI ahb/dma */ +#define AR2315_RESET_MEMCTL 0x00000010 /* warm reset mem control */ +#define AR2315_RESET_LOCAL 0x00000020 /* warm reset local bus */ +#define AR2315_RESET_I2C_RSVD 0x00000040 /* warm reset I2C bus */ +#define AR2315_RESET_SPI 0x00000080 /* warm reset SPI iface */ +#define AR2315_RESET_UART0 0x00000100 /* warm reset UART0 */ +#define AR2315_RESET_IR_RSVD 0x00000200 /* warm reset IR iface */ +#define AR2315_RESET_EPHY0 0x00000400 /* cold reset ENET0 phy */ +#define AR2315_RESET_ENET0 0x00000800 /* cold reset ENET0 MAC */ + +/* AHB master arbitration control */ +#define AR2315_AHB_ARB_CTL 0x0008 + +#define AR2315_ARB_CPU 0x00000001 /* CPU, default */ +#define AR2315_ARB_WLAN 0x00000002 /* WLAN */ +#define AR2315_ARB_MPEGTS_RSVD 0x00000004 /* MPEG-TS */ +#define AR2315_ARB_LOCAL 0x00000008 /* Local bus */ +#define AR2315_ARB_PCI 0x00000010 /* PCI bus */ +#define AR2315_ARB_ETHERNET 0x00000020 /* Ethernet */ +#define AR2315_ARB_RETRY 0x00000100 /* Retry policy (debug) */ + +/* Config Register */ +#define AR2315_ENDIAN_CTL 0x000c + +#define AR2315_CONFIG_AHB 0x00000001 /* EC-AHB bridge endian */ +#define AR2315_CONFIG_WLAN 0x00000002 /* WLAN byteswap */ +#define AR2315_CONFIG_MPEGTS_RSVD 0x00000004 /* MPEG-TS byteswap */ +#define AR2315_CONFIG_PCI 0x00000008 /* PCI byteswap */ +#define AR2315_CONFIG_MEMCTL 0x00000010 /* Mem controller endian */ +#define AR2315_CONFIG_LOCAL 0x00000020 /* Local bus byteswap */ +#define AR2315_CONFIG_ETHERNET 0x00000040 /* Ethernet byteswap */ +#define AR2315_CONFIG_MERGE 0x00000200 /* CPU write buffer merge */ +#define AR2315_CONFIG_CPU 0x00000400 /* CPU big endian */ +#define AR2315_CONFIG_BIG 0x00000400 +#define AR2315_CONFIG_PCIAHB 0x00000800 +#define AR2315_CONFIG_PCIAHB_BRIDGE 0x00001000 +#define AR2315_CONFIG_SPI 0x00008000 /* SPI byteswap */ +#define AR2315_CONFIG_CPU_DRAM 0x00010000 +#define AR2315_CONFIG_CPU_PCI 0x00020000 +#define AR2315_CONFIG_CPU_MMR 0x00040000 + +/* NMI control */ +#define AR2315_NMI_CTL 0x0010 + +#define AR2315_NMI_EN 1 + +/* Revision Register - Initial value is 0x3010 (WMAC 3.0, AR231X 1.0). */ +#define AR2315_SREV 0x0014 + +#define AR2315_REV_MAJ 0x000000f0 +#define AR2315_REV_MAJ_S 4 +#define AR2315_REV_MIN 0x0000000f +#define AR2315_REV_MIN_S 0 +#define AR2315_REV_CHIP (AR2315_REV_MAJ | AR2315_REV_MIN) + +/* Interface Enable */ +#define AR2315_IF_CTL 0x0018 + +#define AR2315_IF_MASK 0x00000007 +#define AR2315_IF_DISABLED 0 /* Disable all */ +#define AR2315_IF_PCI 1 /* PCI */ +#define AR2315_IF_TS_LOCAL 2 /* Local bus */ +#define AR2315_IF_ALL 3 /* Emulation only */ +#define AR2315_IF_LOCAL_HOST 0x00000008 +#define AR2315_IF_PCI_HOST 0x00000010 +#define AR2315_IF_PCI_INTR 0x00000020 +#define AR2315_IF_PCI_CLK_MASK 0x00030000 +#define AR2315_IF_PCI_CLK_INPUT 0 +#define AR2315_IF_PCI_CLK_OUTPUT_LOW 1 +#define AR2315_IF_PCI_CLK_OUTPUT_CLK 2 +#define AR2315_IF_PCI_CLK_OUTPUT_HIGH 3 +#define AR2315_IF_PCI_CLK_SHIFT 16 + +/* APB Interrupt control */ +#define AR2315_ISR 0x0020 +#define AR2315_IMR 0x0024 +#define AR2315_GISR 0x0028 + +#define AR2315_ISR_UART0 0x00000001 /* high speed UART */ +#define AR2315_ISR_I2C_RSVD 0x00000002 /* I2C bus */ +#define AR2315_ISR_SPI 0x00000004 /* SPI bus */ +#define AR2315_ISR_AHB 0x00000008 /* AHB error */ +#define AR2315_ISR_APB 0x00000010 /* APB error */ +#define AR2315_ISR_TIMER 0x00000020 /* Timer */ +#define AR2315_ISR_GPIO 0x00000040 /* GPIO */ +#define AR2315_ISR_WD 0x00000080 /* Watchdog */ +#define AR2315_ISR_IR_RSVD 0x00000100 /* IR */ + +#define AR2315_GISR_MISC 0x00000001 /* Misc */ +#define AR2315_GISR_WLAN0 0x00000002 /* WLAN0 */ +#define AR2315_GISR_MPEGTS_RSVD 0x00000004 /* MPEG-TS */ +#define AR2315_GISR_LOCALPCI 0x00000008 /* Local/PCI bus */ +#define AR2315_GISR_WMACPOLL 0x00000010 +#define AR2315_GISR_TIMER 0x00000020 +#define AR2315_GISR_ETHERNET 0x00000040 /* Ethernet */ + +/* Generic timer */ +#define AR2315_TIMER 0x0030 +#define AR2315_RELOAD 0x0034 + +/* Watchdog timer */ +#define AR2315_WDT_TIMER 0x0038 +#define AR2315_WDT_CTRL 0x003c + +#define AR2315_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */ +#define AR2315_WDT_CTRL_NMI 0x00000001 /* NMI on watchdog */ +#define AR2315_WDT_CTRL_RESET 0x00000002 /* reset on watchdog */ + +/* CPU Performance Counters */ +#define AR2315_PERFCNT0 0x0048 +#define AR2315_PERFCNT1 0x004c + +#define AR2315_PERF0_DATAHIT 0x00000001 /* Count Data Cache Hits */ +#define AR2315_PERF0_DATAMISS 0x00000002 /* Count Data Cache Misses */ +#define AR2315_PERF0_INSTHIT 0x00000004 /* Count Instruction Cache Hits */ +#define AR2315_PERF0_INSTMISS 0x00000008 /* Count Instruction Cache Misses */ +#define AR2315_PERF0_ACTIVE 0x00000010 /* Count Active Processor Cycles */ +#define AR2315_PERF0_WBHIT 0x00000020 /* Count CPU Write Buffer Hits */ +#define AR2315_PERF0_WBMISS 0x00000040 /* Count CPU Write Buffer Misses */ + +#define AR2315_PERF1_EB_ARDY 0x00000001 /* Count EB_ARdy signal */ +#define AR2315_PERF1_EB_AVALID 0x00000002 /* Count EB_AValid signal */ +#define AR2315_PERF1_EB_WDRDY 0x00000004 /* Count EB_WDRdy signal */ +#define AR2315_PERF1_EB_RDVAL 0x00000008 /* Count EB_RdVal signal */ +#define AR2315_PERF1_VRADDR 0x00000010 /* Count valid read address cycles*/ +#define AR2315_PERF1_VWADDR 0x00000020 /* Count valid write address cycl.*/ +#define AR2315_PERF1_VWDATA 0x00000040 /* Count valid write data cycles */ + +/* AHB Error Reporting */ +#define AR2315_AHB_ERR0 0x0050 /* error */ +#define AR2315_AHB_ERR1 0x0054 /* haddr */ +#define AR2315_AHB_ERR2 0x0058 /* hwdata */ +#define AR2315_AHB_ERR3 0x005c /* hrdata */ +#define AR2315_AHB_ERR4 0x0060 /* status */ + +#define AR2315_AHB_ERROR_DET 1 /* AHB Error has been detected, */ + /* write 1 to clear all bits in ERR0 */ +#define AR2315_AHB_ERROR_OVR 2 /* AHB Error overflow has been detected */ +#define AR2315_AHB_ERROR_WDT 4 /* AHB Error due to wdt instead of hresp */ + +#define AR2315_PROCERR_HMAST 0x0000000f +#define AR2315_PROCERR_HMAST_DFLT 0 +#define AR2315_PROCERR_HMAST_WMAC 1 +#define AR2315_PROCERR_HMAST_ENET 2 +#define AR2315_PROCERR_HMAST_PCIENDPT 3 +#define AR2315_PROCERR_HMAST_LOCAL 4 +#define AR2315_PROCERR_HMAST_CPU 5 +#define AR2315_PROCERR_HMAST_PCITGT 6 +#define AR2315_PROCERR_HMAST_S 0 +#define AR2315_PROCERR_HWRITE 0x00000010 +#define AR2315_PROCERR_HSIZE 0x00000060 +#define AR2315_PROCERR_HSIZE_S 5 +#define AR2315_PROCERR_HTRANS 0x00000180 +#define AR2315_PROCERR_HTRANS_S 7 +#define AR2315_PROCERR_HBURST 0x00000e00 +#define AR2315_PROCERR_HBURST_S 9 + +/* Clock Control */ +#define AR2315_PLLC_CTL 0x0064 +#define AR2315_PLLV_CTL 0x0068 +#define AR2315_CPUCLK 0x006c +#define AR2315_AMBACLK 0x0070 +#define AR2315_SYNCCLK 0x0074 +#define AR2315_DSL_SLEEP_CTL 0x0080 +#define AR2315_DSL_SLEEP_DUR 0x0084 + +/* PLLc Control fields */ +#define AR2315_PLLC_REF_DIV_M 0x00000003 +#define AR2315_PLLC_REF_DIV_S 0 +#define AR2315_PLLC_FDBACK_DIV_M 0x0000007c +#define AR2315_PLLC_FDBACK_DIV_S 2 +#define AR2315_PLLC_ADD_FDBACK_DIV_M 0x00000080 +#define AR2315_PLLC_ADD_FDBACK_DIV_S 7 +#define AR2315_PLLC_CLKC_DIV_M 0x0001c000 +#define AR2315_PLLC_CLKC_DIV_S 14 +#define AR2315_PLLC_CLKM_DIV_M 0x00700000 +#define AR2315_PLLC_CLKM_DIV_S 20 + +/* CPU CLK Control fields */ +#define AR2315_CPUCLK_CLK_SEL_M 0x00000003 +#define AR2315_CPUCLK_CLK_SEL_S 0 +#define AR2315_CPUCLK_CLK_DIV_M 0x0000000c +#define AR2315_CPUCLK_CLK_DIV_S 2 + +/* AMBA CLK Control fields */ +#define AR2315_AMBACLK_CLK_SEL_M 0x00000003 +#define AR2315_AMBACLK_CLK_SEL_S 0 +#define AR2315_AMBACLK_CLK_DIV_M 0x0000000c +#define AR2315_AMBACLK_CLK_DIV_S 2 + +/* PCI Clock Control */ +#define AR2315_PCICLK 0x00a4 + +#define AR2315_PCICLK_INPUT_M 0x00000003 +#define AR2315_PCICLK_INPUT_S 0 +#define AR2315_PCICLK_PLLC_CLKM 0 +#define AR2315_PCICLK_PLLC_CLKM1 1 +#define AR2315_PCICLK_PLLC_CLKC 2 +#define AR2315_PCICLK_REF_CLK 3 +#define AR2315_PCICLK_DIV_M 0x0000000c +#define AR2315_PCICLK_DIV_S 2 +#define AR2315_PCICLK_IN_FREQ 0 +#define AR2315_PCICLK_IN_FREQ_DIV_6 1 +#define AR2315_PCICLK_IN_FREQ_DIV_8 2 +#define AR2315_PCICLK_IN_FREQ_DIV_10 3 + +/* Observation Control Register */ +#define AR2315_OCR 0x00b0 + +#define AR2315_OCR_GPIO0_IRIN 0x00000040 +#define AR2315_OCR_GPIO1_IROUT 0x00000080 +#define AR2315_OCR_GPIO3_RXCLR 0x00000200 + +/* General Clock Control */ +#define AR2315_MISCCLK 0x00b4 + +#define AR2315_MISCCLK_PLLBYPASS_EN 0x00000001 +#define AR2315_MISCCLK_PROCREFCLK 0x00000002 + +/* + * SDRAM Controller + * - No read or write buffers are included. + */ +#define AR2315_MEM_CFG 0x0000 +#define AR2315_MEM_CTRL 0x000c +#define AR2315_MEM_REF 0x0010 + +#define AR2315_MEM_CFG_DATA_WIDTH_M 0x00006000 +#define AR2315_MEM_CFG_DATA_WIDTH_S 13 +#define AR2315_MEM_CFG_COL_WIDTH_M 0x00001e00 +#define AR2315_MEM_CFG_COL_WIDTH_S 9 +#define AR2315_MEM_CFG_ROW_WIDTH_M 0x000001e0 +#define AR2315_MEM_CFG_ROW_WIDTH_S 5 +#define AR2315_MEM_CFG_BANKADDR_BITS_M 0x00000018 +#define AR2315_MEM_CFG_BANKADDR_BITS_S 3 + +/* + * Local Bus Interface Registers + */ +#define AR2315_LB_CONFIG 0x0000 + +#define AR2315_LBCONF_OE 0x00000001 /* =1 OE is low-true */ +#define AR2315_LBCONF_CS0 0x00000002 /* =1 first CS is low-true */ +#define AR2315_LBCONF_CS1 0x00000004 /* =1 2nd CS is low-true */ +#define AR2315_LBCONF_RDY 0x00000008 /* =1 RDY is low-true */ +#define AR2315_LBCONF_WE 0x00000010 /* =1 Write En is low-true */ +#define AR2315_LBCONF_WAIT 0x00000020 /* =1 WAIT is low-true */ +#define AR2315_LBCONF_ADS 0x00000040 /* =1 Adr Strobe is low-true */ +#define AR2315_LBCONF_MOT 0x00000080 /* =0 Intel, =1 Motorola */ +#define AR2315_LBCONF_8CS 0x00000100 /* =1 8 bits CS, 0= 16bits */ +#define AR2315_LBCONF_8DS 0x00000200 /* =1 8 bits Data S, 0=16bits */ +#define AR2315_LBCONF_ADS_EN 0x00000400 /* =1 Enable ADS */ +#define AR2315_LBCONF_ADR_OE 0x00000800 /* =1 Adr cap on OE, WE or DS */ +#define AR2315_LBCONF_ADDT_MUX 0x00001000 /* =1 Adr and Data share bus */ +#define AR2315_LBCONF_DATA_OE 0x00002000 /* =1 Data cap on OE, WE, DS */ +#define AR2315_LBCONF_16DATA 0x00004000 /* =1 Data is 16 bits wide */ +#define AR2315_LBCONF_SWAPDT 0x00008000 /* =1 Byte swap data */ +#define AR2315_LBCONF_SYNC 0x00010000 /* =1 Bus synchronous to clk */ +#define AR2315_LBCONF_INT 0x00020000 /* =1 Intr is low true */ +#define AR2315_LBCONF_INT_CTR0 0x00000000 /* GND high-Z, Vdd is high-Z */ +#define AR2315_LBCONF_INT_CTR1 0x00040000 /* GND drive, Vdd is high-Z */ +#define AR2315_LBCONF_INT_CTR2 0x00080000 /* GND high-Z, Vdd drive */ +#define AR2315_LBCONF_INT_CTR3 0x000c0000 /* GND drive, Vdd drive */ +#define AR2315_LBCONF_RDY_WAIT 0x00100000 /* =1 RDY is negative of WAIT */ +#define AR2315_LBCONF_INT_PULSE 0x00200000 /* =1 Interrupt is a pulse */ +#define AR2315_LBCONF_ENABLE 0x00400000 /* =1 Falcon respond to LB */ + +#define AR2315_LB_CLKSEL 0x0004 + +#define AR2315_LBCLK_EXT 0x00000001 /* use external clk for lb */ + +#define AR2315_LB_1MS 0x0008 + +#define AR2315_LB1MS_MASK 0x0003ffff /* # of AHB clk cycles in 1ms */ + +#define AR2315_LB_MISCCFG 0x000c + +#define AR2315_LBM_TXD_EN 0x00000001 /* Enable TXD for fragments */ +#define AR2315_LBM_RX_INTEN 0x00000002 /* Enable LB ints on RX ready */ +#define AR2315_LBM_MBOXWR_INTEN 0x00000004 /* Enable LB ints on mbox wr */ +#define AR2315_LBM_MBOXRD_INTEN 0x00000008 /* Enable LB ints on mbox rd */ +#define AR2315_LMB_DESCSWAP_EN 0x00000010 /* Byte swap desc enable */ +#define AR2315_LBM_TIMEOUT_M 0x00ffff80 +#define AR2315_LBM_TIMEOUT_S 7 +#define AR2315_LBM_PORTMUX 0x07000000 + +#define AR2315_LB_RXTSOFF 0x0010 + +#define AR2315_LB_TX_CHAIN_EN 0x0100 + +#define AR2315_LB_TXEN_0 0x00000001 +#define AR2315_LB_TXEN_1 0x00000002 +#define AR2315_LB_TXEN_2 0x00000004 +#define AR2315_LB_TXEN_3 0x00000008 + +#define AR2315_LB_TX_CHAIN_DIS 0x0104 +#define AR2315_LB_TX_DESC_PTR 0x0200 + +#define AR2315_LB_RX_CHAIN_EN 0x0400 + +#define AR2315_LB_RXEN 0x00000001 + +#define AR2315_LB_RX_CHAIN_DIS 0x0404 +#define AR2315_LB_RX_DESC_PTR 0x0408 + +#define AR2315_LB_INT_STATUS 0x0500 + +#define AR2315_LB_INT_TX_DESC 0x00000001 +#define AR2315_LB_INT_TX_OK 0x00000002 +#define AR2315_LB_INT_TX_ERR 0x00000004 +#define AR2315_LB_INT_TX_EOF 0x00000008 +#define AR2315_LB_INT_RX_DESC 0x00000010 +#define AR2315_LB_INT_RX_OK 0x00000020 +#define AR2315_LB_INT_RX_ERR 0x00000040 +#define AR2315_LB_INT_RX_EOF 0x00000080 +#define AR2315_LB_INT_TX_TRUNC 0x00000100 +#define AR2315_LB_INT_TX_STARVE 0x00000200 +#define AR2315_LB_INT_LB_TIMEOUT 0x00000400 +#define AR2315_LB_INT_LB_ERR 0x00000800 +#define AR2315_LB_INT_MBOX_WR 0x00001000 +#define AR2315_LB_INT_MBOX_RD 0x00002000 + +/* Bit definitions for INT MASK are the same as INT_STATUS */ +#define AR2315_LB_INT_MASK 0x0504 + +#define AR2315_LB_INT_EN 0x0508 +#define AR2315_LB_MBOX 0x0600 + +#endif /* __ASM_MACH_ATH25_AR2315_REGS_H */ diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c new file mode 100644 index 000000000000..b6887f75144c --- /dev/null +++ b/arch/mips/ath25/ar5312.c @@ -0,0 +1,393 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. + * Copyright (C) 2006 FON Technology, SL. + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com> + */ + +/* + * Platform devices for Atheros AR5312 SoCs + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/mtd/physmap.h> +#include <linux/reboot.h> +#include <asm/bootinfo.h> +#include <asm/reboot.h> +#include <asm/time.h> + +#include <ath25_platform.h> + +#include "devices.h" +#include "ar5312.h" +#include "ar5312_regs.h" + +static void __iomem *ar5312_rst_base; +static struct irq_domain *ar5312_misc_irq_domain; + +static inline u32 ar5312_rst_reg_read(u32 reg) +{ + return __raw_readl(ar5312_rst_base + reg); +} + +static inline void ar5312_rst_reg_write(u32 reg, u32 val) +{ + __raw_writel(val, ar5312_rst_base + reg); +} + +static inline void ar5312_rst_reg_mask(u32 reg, u32 mask, u32 val) +{ + u32 ret = ar5312_rst_reg_read(reg); + + ret &= ~mask; + ret |= val; + ar5312_rst_reg_write(reg, ret); +} + +static irqreturn_t ar5312_ahb_err_handler(int cpl, void *dev_id) +{ + u32 proc1 = ar5312_rst_reg_read(AR5312_PROC1); + u32 proc_addr = ar5312_rst_reg_read(AR5312_PROCADDR); /* clears error */ + u32 dma1 = ar5312_rst_reg_read(AR5312_DMA1); + u32 dma_addr = ar5312_rst_reg_read(AR5312_DMAADDR); /* clears error */ + + pr_emerg("AHB interrupt: PROCADDR=0x%8.8x PROC1=0x%8.8x DMAADDR=0x%8.8x DMA1=0x%8.8x\n", + proc_addr, proc1, dma_addr, dma1); + + machine_restart("AHB error"); /* Catastrophic failure */ + return IRQ_HANDLED; +} + +static struct irqaction ar5312_ahb_err_interrupt = { + .handler = ar5312_ahb_err_handler, + .name = "ar5312-ahb-error", +}; + +static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) +{ + u32 pending = ar5312_rst_reg_read(AR5312_ISR) & + ar5312_rst_reg_read(AR5312_IMR); + unsigned nr, misc_irq = 0; + + if (pending) { + struct irq_domain *domain = irq_get_handler_data(irq); + + nr = __ffs(pending); + misc_irq = irq_find_mapping(domain, nr); + } + + if (misc_irq) { + generic_handle_irq(misc_irq); + if (nr == AR5312_MISC_IRQ_TIMER) + ar5312_rst_reg_read(AR5312_TIMER); + } else { + spurious_interrupt(); + } +} + +/* Enable the specified AR5312_MISC_IRQ interrupt */ +static void ar5312_misc_irq_unmask(struct irq_data *d) +{ + ar5312_rst_reg_mask(AR5312_IMR, 0, BIT(d->hwirq)); +} + +/* Disable the specified AR5312_MISC_IRQ interrupt */ +static void ar5312_misc_irq_mask(struct irq_data *d) +{ + ar5312_rst_reg_mask(AR5312_IMR, BIT(d->hwirq), 0); + ar5312_rst_reg_read(AR5312_IMR); /* flush write buffer */ +} + +static struct irq_chip ar5312_misc_irq_chip = { + .name = "ar5312-misc", + .irq_unmask = ar5312_misc_irq_unmask, + .irq_mask = ar5312_misc_irq_mask, +}; + +static int ar5312_misc_irq_map(struct irq_domain *d, unsigned irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &ar5312_misc_irq_chip, handle_level_irq); + return 0; +} + +static struct irq_domain_ops ar5312_misc_irq_domain_ops = { + .map = ar5312_misc_irq_map, +}; + +static void ar5312_irq_dispatch(void) +{ + u32 pending = read_c0_status() & read_c0_cause(); + + if (pending & CAUSEF_IP2) + do_IRQ(AR5312_IRQ_WLAN0); + else if (pending & CAUSEF_IP5) + do_IRQ(AR5312_IRQ_WLAN1); + else if (pending & CAUSEF_IP6) + do_IRQ(AR5312_IRQ_MISC); + else if (pending & CAUSEF_IP7) + do_IRQ(ATH25_IRQ_CPU_CLOCK); + else + spurious_interrupt(); +} + +void __init ar5312_arch_init_irq(void) +{ + struct irq_domain *domain; + unsigned irq; + + ath25_irq_dispatch = ar5312_irq_dispatch; + + domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, + &ar5312_misc_irq_domain_ops, NULL); + if (!domain) + panic("Failed to add IRQ domain"); + + irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); + setup_irq(irq, &ar5312_ahb_err_interrupt); + + irq_set_chained_handler(AR5312_IRQ_MISC, ar5312_misc_irq_handler); + irq_set_handler_data(AR5312_IRQ_MISC, domain); + + ar5312_misc_irq_domain = domain; +} + +static struct physmap_flash_data ar5312_flash_data = { + .width = 2, +}; + +static struct resource ar5312_flash_resource = { + .start = AR5312_FLASH_BASE, + .end = AR5312_FLASH_BASE + AR5312_FLASH_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device ar5312_physmap_flash = { + .name = "physmap-flash", + .id = 0, + .dev.platform_data = &ar5312_flash_data, + .resource = &ar5312_flash_resource, + .num_resources = 1, +}; + +static void __init ar5312_flash_init(void) +{ + void __iomem *flashctl_base; + u32 ctl; + + flashctl_base = ioremap_nocache(AR5312_FLASHCTL_BASE, + AR5312_FLASHCTL_SIZE); + + ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0); + ctl &= AR5312_FLASHCTL_MW; + + /* fixup flash width */ + switch (ctl) { + case AR5312_FLASHCTL_MW16: + ar5312_flash_data.width = 2; + break; + case AR5312_FLASHCTL_MW8: + default: + ar5312_flash_data.width = 1; + break; + } + + /* + * Configure flash bank 0. + * Assume 8M window size. Flash will be aliased if it's smaller + */ + ctl |= AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC_8M | AR5312_FLASHCTL_RBLE; + ctl |= 0x01 << AR5312_FLASHCTL_IDCY_S; + ctl |= 0x07 << AR5312_FLASHCTL_WST1_S; + ctl |= 0x07 << AR5312_FLASHCTL_WST2_S; + __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL0); + + /* Disable other flash banks */ + ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL1); + ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC); + __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL1); + ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL2); + ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC); + __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL2); + + iounmap(flashctl_base); +} + +void __init ar5312_init_devices(void) +{ + struct ath25_boarddata *config; + + ar5312_flash_init(); + + /* Locate board/radio config data */ + ath25_find_config(AR5312_FLASH_BASE, AR5312_FLASH_SIZE); + config = ath25_board.config; + + /* AR2313 has CPU minor rev. 10 */ + if ((current_cpu_data.processor_id & 0xff) == 0x0a) + ath25_soc = ATH25_SOC_AR2313; + + /* AR2312 shares the same Silicon ID as AR5312 */ + else if (config->flags & BD_ISCASPER) + ath25_soc = ATH25_SOC_AR2312; + + /* Everything else is probably AR5312 or compatible */ + else + ath25_soc = ATH25_SOC_AR5312; + + platform_device_register(&ar5312_physmap_flash); + + switch (ath25_soc) { + case ATH25_SOC_AR5312: + if (!ath25_board.radio) + return; + + if (!(config->flags & BD_WLAN0)) + break; + + ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0); + break; + case ATH25_SOC_AR2312: + case ATH25_SOC_AR2313: + if (!ath25_board.radio) + return; + break; + default: + break; + } + + if (config->flags & BD_WLAN1) + ath25_add_wmac(1, AR5312_WLAN1_BASE, AR5312_IRQ_WLAN1); +} + +static void ar5312_restart(char *command) +{ + /* reset the system */ + local_irq_disable(); + while (1) + ar5312_rst_reg_write(AR5312_RESET, AR5312_RESET_SYSTEM); +} + +/* + * This table is indexed by bits 5..4 of the CLOCKCTL1 register + * to determine the predevisor value. + */ +static unsigned clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 }; + +static unsigned __init ar5312_cpu_frequency(void) +{ + u32 scratch, devid, clock_ctl1; + u32 predivide_mask, multiplier_mask, doubler_mask; + unsigned predivide_shift, multiplier_shift; + unsigned predivide_select, predivisor, multiplier; + + /* Trust the bootrom's idea of cpu frequency. */ + scratch = ar5312_rst_reg_read(AR5312_SCRATCH); + if (scratch) + return scratch; + + devid = ar5312_rst_reg_read(AR5312_REV); + devid = (devid & AR5312_REV_MAJ) >> AR5312_REV_MAJ_S; + if (devid == AR5312_REV_MAJ_AR2313) { + predivide_mask = AR2313_CLOCKCTL1_PREDIVIDE_MASK; + predivide_shift = AR2313_CLOCKCTL1_PREDIVIDE_SHIFT; + multiplier_mask = AR2313_CLOCKCTL1_MULTIPLIER_MASK; + multiplier_shift = AR2313_CLOCKCTL1_MULTIPLIER_SHIFT; + doubler_mask = AR2313_CLOCKCTL1_DOUBLER_MASK; + } else { /* AR5312 and AR2312 */ + predivide_mask = AR5312_CLOCKCTL1_PREDIVIDE_MASK; + predivide_shift = AR5312_CLOCKCTL1_PREDIVIDE_SHIFT; + multiplier_mask = AR5312_CLOCKCTL1_MULTIPLIER_MASK; + multiplier_shift = AR5312_CLOCKCTL1_MULTIPLIER_SHIFT; + doubler_mask = AR5312_CLOCKCTL1_DOUBLER_MASK; + } + + /* + * Clocking is derived from a fixed 40MHz input clock. + * + * cpu_freq = input_clock * MULT (where MULT is PLL multiplier) + * sys_freq = cpu_freq / 4 (used for APB clock, serial, + * flash, Timer, Watchdog Timer) + * + * cnt_freq = cpu_freq / 2 (use for CPU count/compare) + * + * So, for example, with a PLL multiplier of 5, we have + * + * cpu_freq = 200MHz + * sys_freq = 50MHz + * cnt_freq = 100MHz + * + * We compute the CPU frequency, based on PLL settings. + */ + + clock_ctl1 = ar5312_rst_reg_read(AR5312_CLOCKCTL1); + predivide_select = (clock_ctl1 & predivide_mask) >> predivide_shift; + predivisor = clockctl1_predivide_table[predivide_select]; + multiplier = (clock_ctl1 & multiplier_mask) >> multiplier_shift; + + if (clock_ctl1 & doubler_mask) + multiplier <<= 1; + + return (40000000 / predivisor) * multiplier; +} + +static inline unsigned ar5312_sys_frequency(void) +{ + return ar5312_cpu_frequency() / 4; +} + +void __init ar5312_plat_time_init(void) +{ + mips_hpt_frequency = ar5312_cpu_frequency() / 2; +} + +void __init ar5312_plat_mem_setup(void) +{ + void __iomem *sdram_base; + u32 memsize, memcfg, bank0_ac, bank1_ac; + u32 devid; + + /* Detect memory size */ + sdram_base = ioremap_nocache(AR5312_SDRAMCTL_BASE, + AR5312_SDRAMCTL_SIZE); + memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1); + bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0); + bank1_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC1); + memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) + + (bank1_ac ? (1 << (bank1_ac + 1)) : 0); + memsize <<= 20; + add_memory_region(0, memsize, BOOT_MEM_RAM); + iounmap(sdram_base); + + ar5312_rst_base = ioremap_nocache(AR5312_RST_BASE, AR5312_RST_SIZE); + + devid = ar5312_rst_reg_read(AR5312_REV); + devid >>= AR5312_REV_WMAC_MIN_S; + devid &= AR5312_REV_CHIP; + ath25_board.devid = (u16)devid; + + /* Clear any lingering AHB errors */ + ar5312_rst_reg_read(AR5312_PROCADDR); + ar5312_rst_reg_read(AR5312_DMAADDR); + ar5312_rst_reg_write(AR5312_WDT_CTRL, AR5312_WDT_CTRL_IGNORE); + + _machine_restart = ar5312_restart; +} + +void __init ar5312_arch_init(void) +{ + unsigned irq = irq_create_mapping(ar5312_misc_irq_domain, + AR5312_MISC_IRQ_UART0); + + ath25_serial_setup(AR5312_UART0_BASE, irq, ar5312_sys_frequency()); +} diff --git a/arch/mips/ath25/ar5312.h b/arch/mips/ath25/ar5312.h new file mode 100644 index 000000000000..470abb0052bd --- /dev/null +++ b/arch/mips/ath25/ar5312.h @@ -0,0 +1,22 @@ +#ifndef __AR5312_H +#define __AR5312_H + +#ifdef CONFIG_SOC_AR5312 + +void ar5312_arch_init_irq(void); +void ar5312_init_devices(void); +void ar5312_plat_time_init(void); +void ar5312_plat_mem_setup(void); +void ar5312_arch_init(void); + +#else + +static inline void ar5312_arch_init_irq(void) {} +static inline void ar5312_init_devices(void) {} +static inline void ar5312_plat_time_init(void) {} +static inline void ar5312_plat_mem_setup(void) {} +static inline void ar5312_arch_init(void) {} + +#endif + +#endif /* __AR5312_H */ diff --git a/arch/mips/ath25/ar5312_regs.h b/arch/mips/ath25/ar5312_regs.h new file mode 100644 index 000000000000..4b947f967439 --- /dev/null +++ b/arch/mips/ath25/ar5312_regs.h @@ -0,0 +1,224 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> + */ + +#ifndef __ASM_MACH_ATH25_AR5312_REGS_H +#define __ASM_MACH_ATH25_AR5312_REGS_H + +/* + * IRQs + */ +#define AR5312_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */ +#define AR5312_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */ +#define AR5312_IRQ_ENET1 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */ +#define AR5312_IRQ_WLAN1 (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */ +#define AR5312_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */ + +/* + * Miscellaneous interrupts, which share IP6. + */ +#define AR5312_MISC_IRQ_TIMER 0 +#define AR5312_MISC_IRQ_AHB_PROC 1 +#define AR5312_MISC_IRQ_AHB_DMA 2 +#define AR5312_MISC_IRQ_GPIO 3 +#define AR5312_MISC_IRQ_UART0 4 +#define AR5312_MISC_IRQ_UART0_DMA 5 +#define AR5312_MISC_IRQ_WATCHDOG 6 +#define AR5312_MISC_IRQ_LOCAL 7 +#define AR5312_MISC_IRQ_SPI 8 +#define AR5312_MISC_IRQ_COUNT 9 + +/* + * Address Map + * + * The AR5312 supports 2 enet MACS, even though many reference boards only + * actually use 1 of them (i.e. Only MAC 0 is actually connected to an enet + * PHY or PHY switch. The AR2312 supports 1 enet MAC. + */ +#define AR5312_WLAN0_BASE 0x18000000 +#define AR5312_ENET0_BASE 0x18100000 +#define AR5312_ENET1_BASE 0x18200000 +#define AR5312_SDRAMCTL_BASE 0x18300000 +#define AR5312_SDRAMCTL_SIZE 0x00000010 +#define AR5312_FLASHCTL_BASE 0x18400000 +#define AR5312_FLASHCTL_SIZE 0x00000010 +#define AR5312_WLAN1_BASE 0x18500000 +#define AR5312_UART0_BASE 0x1c000000 /* UART MMR */ +#define AR5312_GPIO_BASE 0x1c002000 +#define AR5312_GPIO_SIZE 0x00000010 +#define AR5312_RST_BASE 0x1c003000 +#define AR5312_RST_SIZE 0x00000100 +#define AR5312_FLASH_BASE 0x1e000000 +#define AR5312_FLASH_SIZE 0x00800000 + +/* + * Need these defines to determine true number of ethernet MACs + */ +#define AR5312_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */ +#define AR5312_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */ +#define AR5312_AR2313_REV8 0x0058 /* AR2313 WMAC (AP43-030) */ + +/* Reset/Timer Block Address Map */ +#define AR5312_TIMER 0x0000 /* countdown timer */ +#define AR5312_RELOAD 0x0004 /* timer reload value */ +#define AR5312_WDT_CTRL 0x0008 /* watchdog cntrl */ +#define AR5312_WDT_TIMER 0x000c /* watchdog timer */ +#define AR5312_ISR 0x0010 /* Intr Status Reg */ +#define AR5312_IMR 0x0014 /* Intr Mask Reg */ +#define AR5312_RESET 0x0020 +#define AR5312_CLOCKCTL1 0x0064 +#define AR5312_SCRATCH 0x006c +#define AR5312_PROCADDR 0x0070 +#define AR5312_PROC1 0x0074 +#define AR5312_DMAADDR 0x0078 +#define AR5312_DMA1 0x007c +#define AR5312_ENABLE 0x0080 /* interface enb */ +#define AR5312_REV 0x0090 /* revision */ + +/* AR5312_WDT_CTRL register bit field definitions */ +#define AR5312_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */ +#define AR5312_WDT_CTRL_NMI 0x00000001 +#define AR5312_WDT_CTRL_RESET 0x00000002 + +/* AR5312_ISR register bit field definitions */ +#define AR5312_ISR_TIMER 0x00000001 +#define AR5312_ISR_AHBPROC 0x00000002 +#define AR5312_ISR_AHBDMA 0x00000004 +#define AR5312_ISR_GPIO 0x00000008 +#define AR5312_ISR_UART0 0x00000010 +#define AR5312_ISR_UART0DMA 0x00000020 +#define AR5312_ISR_WD 0x00000040 +#define AR5312_ISR_LOCAL 0x00000080 + +/* AR5312_RESET register bit field definitions */ +#define AR5312_RESET_SYSTEM 0x00000001 /* cold reset full system */ +#define AR5312_RESET_PROC 0x00000002 /* cold reset MIPS core */ +#define AR5312_RESET_WLAN0 0x00000004 /* cold reset WLAN MAC/BB */ +#define AR5312_RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */ +#define AR5312_RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */ +#define AR5312_RESET_ENET0 0x00000020 /* cold reset ENET0 MAC */ +#define AR5312_RESET_ENET1 0x00000040 /* cold reset ENET1 MAC */ +#define AR5312_RESET_UART0 0x00000100 /* cold reset UART0 */ +#define AR5312_RESET_WLAN1 0x00000200 /* cold reset WLAN MAC/BB */ +#define AR5312_RESET_APB 0x00000400 /* cold reset APB ar5312 */ +#define AR5312_RESET_WARM_PROC 0x00001000 /* warm reset MIPS core */ +#define AR5312_RESET_WARM_WLAN0_MAC 0x00002000 /* warm reset WLAN0 MAC */ +#define AR5312_RESET_WARM_WLAN0_BB 0x00004000 /* warm reset WLAN0 BB */ +#define AR5312_RESET_NMI 0x00010000 /* send an NMI to the CPU */ +#define AR5312_RESET_WARM_WLAN1_MAC 0x00020000 /* warm reset WLAN1 MAC */ +#define AR5312_RESET_WARM_WLAN1_BB 0x00040000 /* warm reset WLAN1 BB */ +#define AR5312_RESET_LOCAL_BUS 0x00080000 /* reset local bus */ +#define AR5312_RESET_WDOG 0x00100000 /* last reset was a wdt */ + +#define AR5312_RESET_WMAC0_BITS (AR5312_RESET_WLAN0 |\ + AR5312_RESET_WARM_WLAN0_MAC |\ + AR5312_RESET_WARM_WLAN0_BB) + +#define AR5312_RESET_WMAC1_BITS (AR5312_RESET_WLAN1 |\ + AR5312_RESET_WARM_WLAN1_MAC |\ + AR5312_RESET_WARM_WLAN1_BB) + +/* AR5312_CLOCKCTL1 register bit field definitions */ +#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030 +#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4 +#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00 +#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8 +#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000 + +/* Valid for AR5312 and AR2312 */ +#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030 +#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4 +#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00 +#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8 +#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000 + +/* Valid for AR2313 */ +#define AR2313_CLOCKCTL1_PREDIVIDE_MASK 0x00003000 +#define AR2313_CLOCKCTL1_PREDIVIDE_SHIFT 12 +#define AR2313_CLOCKCTL1_MULTIPLIER_MASK 0x001f0000 +#define AR2313_CLOCKCTL1_MULTIPLIER_SHIFT 16 +#define AR2313_CLOCKCTL1_DOUBLER_MASK 0x00000000 + +/* AR5312_ENABLE register bit field definitions */ +#define AR5312_ENABLE_WLAN0 0x00000001 +#define AR5312_ENABLE_ENET0 0x00000002 +#define AR5312_ENABLE_ENET1 0x00000004 +#define AR5312_ENABLE_UART_AND_WLAN1_PIO 0x00000008/* UART & WLAN1 PIO */ +#define AR5312_ENABLE_WLAN1_DMA 0x00000010/* WLAN1 DMAs */ +#define AR5312_ENABLE_WLAN1 (AR5312_ENABLE_UART_AND_WLAN1_PIO |\ + AR5312_ENABLE_WLAN1_DMA) + +/* AR5312_REV register bit field definitions */ +#define AR5312_REV_WMAC_MAJ 0x0000f000 +#define AR5312_REV_WMAC_MAJ_S 12 +#define AR5312_REV_WMAC_MIN 0x00000f00 +#define AR5312_REV_WMAC_MIN_S 8 +#define AR5312_REV_MAJ 0x000000f0 +#define AR5312_REV_MAJ_S 4 +#define AR5312_REV_MIN 0x0000000f +#define AR5312_REV_MIN_S 0 +#define AR5312_REV_CHIP (AR5312_REV_MAJ|AR5312_REV_MIN) + +/* Major revision numbers, bits 7..4 of Revision ID register */ +#define AR5312_REV_MAJ_AR5312 0x4 +#define AR5312_REV_MAJ_AR2313 0x5 + +/* Minor revision numbers, bits 3..0 of Revision ID register */ +#define AR5312_REV_MIN_DUAL 0x0 /* Dual WLAN version */ +#define AR5312_REV_MIN_SINGLE 0x1 /* Single WLAN version */ + +/* + * ARM Flash Controller -- 3 flash banks with either x8 or x16 devices + */ +#define AR5312_FLASHCTL0 0x0000 +#define AR5312_FLASHCTL1 0x0004 +#define AR5312_FLASHCTL2 0x0008 + +/* AR5312_FLASHCTL register bit field definitions */ +#define AR5312_FLASHCTL_IDCY 0x0000000f /* Idle cycle turnaround time */ +#define AR5312_FLASHCTL_IDCY_S 0 +#define AR5312_FLASHCTL_WST1 0x000003e0 /* Wait state 1 */ +#define AR5312_FLASHCTL_WST1_S 5 +#define AR5312_FLASHCTL_RBLE 0x00000400 /* Read byte lane enable */ +#define AR5312_FLASHCTL_WST2 0x0000f800 /* Wait state 2 */ +#define AR5312_FLASHCTL_WST2_S 11 +#define AR5312_FLASHCTL_AC 0x00070000 /* Flash addr check (added) */ +#define AR5312_FLASHCTL_AC_S 16 +#define AR5312_FLASHCTL_AC_128K 0x00000000 +#define AR5312_FLASHCTL_AC_256K 0x00010000 +#define AR5312_FLASHCTL_AC_512K 0x00020000 +#define AR5312_FLASHCTL_AC_1M 0x00030000 +#define AR5312_FLASHCTL_AC_2M 0x00040000 +#define AR5312_FLASHCTL_AC_4M 0x00050000 +#define AR5312_FLASHCTL_AC_8M 0x00060000 +#define AR5312_FLASHCTL_AC_RES 0x00070000 /* 16MB is not supported */ +#define AR5312_FLASHCTL_E 0x00080000 /* Flash bank enable (added) */ +#define AR5312_FLASHCTL_BUSERR 0x01000000 /* Bus transfer error flag */ +#define AR5312_FLASHCTL_WPERR 0x02000000 /* Write protect error flag */ +#define AR5312_FLASHCTL_WP 0x04000000 /* Write protect */ +#define AR5312_FLASHCTL_BM 0x08000000 /* Burst mode */ +#define AR5312_FLASHCTL_MW 0x30000000 /* Mem width */ +#define AR5312_FLASHCTL_MW8 0x00000000 /* Mem width x8 */ +#define AR5312_FLASHCTL_MW16 0x10000000 /* Mem width x16 */ +#define AR5312_FLASHCTL_MW32 0x20000000 /* Mem width x32 (not supp) */ +#define AR5312_FLASHCTL_ATNR 0x00000000 /* Access == no retry */ +#define AR5312_FLASHCTL_ATR 0x80000000 /* Access == retry every */ +#define AR5312_FLASHCTL_ATR4 0xc0000000 /* Access == retry every 4 */ + +/* + * ARM SDRAM Controller -- just enough to determine memory size + */ +#define AR5312_MEM_CFG1 0x0004 + +#define AR5312_MEM_CFG1_AC0_M 0x00000700 /* bank 0: SDRAM addr check */ +#define AR5312_MEM_CFG1_AC0_S 8 +#define AR5312_MEM_CFG1_AC1_M 0x00007000 /* bank 1: SDRAM addr check */ +#define AR5312_MEM_CFG1_AC1_S 12 + +#endif /* __ASM_MACH_ATH25_AR5312_REGS_H */ diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c new file mode 100644 index 000000000000..b8bb78282d6a --- /dev/null +++ b/arch/mips/ath25/board.c @@ -0,0 +1,234 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. + * Copyright (C) 2006 FON Technology, SL. + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org> + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <asm/irq_cpu.h> +#include <asm/reboot.h> +#include <asm/bootinfo.h> +#include <asm/time.h> + +#include <ath25_platform.h> +#include "devices.h" +#include "ar5312.h" +#include "ar2315.h" + +void (*ath25_irq_dispatch)(void); + +static inline bool check_radio_magic(const void __iomem *addr) +{ + addr += 0x7a; /* offset for flash magic */ + return (__raw_readb(addr) == 0x5a) && (__raw_readb(addr + 1) == 0xa5); +} + +static inline bool check_notempty(const void __iomem *addr) +{ + return __raw_readl(addr) != 0xffffffff; +} + +static inline bool check_board_data(const void __iomem *addr, bool broken) +{ + /* config magic found */ + if (__raw_readl(addr) == ATH25_BD_MAGIC) + return true; + + if (!broken) + return false; + + /* broken board data detected, use radio data to find the + * offset, user will fix this */ + + if (check_radio_magic(addr + 0x1000)) + return true; + if (check_radio_magic(addr + 0xf8)) + return true; + + return false; +} + +static const void __iomem * __init find_board_config(const void __iomem *limit, + const bool broken) +{ + const void __iomem *addr; + const void __iomem *begin = limit - 0x1000; + const void __iomem *end = limit - 0x30000; + + for (addr = begin; addr >= end; addr -= 0x1000) + if (check_board_data(addr, broken)) + return addr; + + return NULL; +} + +static const void __iomem * __init find_radio_config(const void __iomem *limit, + const void __iomem *bcfg) +{ + const void __iomem *rcfg, *begin, *end; + + /* + * Now find the start of Radio Configuration data, using heuristics: + * Search forward from Board Configuration data by 0x1000 bytes + * at a time until we find non-0xffffffff. + */ + begin = bcfg + 0x1000; + end = limit; + for (rcfg = begin; rcfg < end; rcfg += 0x1000) + if (check_notempty(rcfg) && check_radio_magic(rcfg)) + return rcfg; + + /* AR2316 relocates radio config to new location */ + begin = bcfg + 0xf8; + end = limit - 0x1000 + 0xf8; + for (rcfg = begin; rcfg < end; rcfg += 0x1000) + if (check_notempty(rcfg) && check_radio_magic(rcfg)) + return rcfg; + + return NULL; +} + +/* + * NB: Search region size could be larger than the actual flash size, + * but this shouldn't be a problem here, because the flash + * will simply be mapped multiple times. + */ +int __init ath25_find_config(phys_addr_t base, unsigned long size) +{ + const void __iomem *flash_base, *flash_limit; + struct ath25_boarddata *config; + unsigned int rcfg_size; + int broken_boarddata = 0; + const void __iomem *bcfg, *rcfg; + u8 *board_data; + u8 *radio_data; + u8 *mac_addr; + u32 offset; + + flash_base = ioremap_nocache(base, size); + flash_limit = flash_base + size; + + ath25_board.config = NULL; + ath25_board.radio = NULL; + + /* Copy the board and radio data to RAM, because accessing the mapped + * memory of the flash directly after booting is not safe */ + + /* Try to find valid board and radio data */ + bcfg = find_board_config(flash_limit, false); + + /* If that fails, try to at least find valid radio data */ + if (!bcfg) { + bcfg = find_board_config(flash_limit, true); + broken_boarddata = 1; + } + + if (!bcfg) { + pr_warn("WARNING: No board configuration data found!\n"); + goto error; + } + + board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); + ath25_board.config = (struct ath25_boarddata *)board_data; + memcpy_fromio(board_data, bcfg, 0x100); + if (broken_boarddata) { + pr_warn("WARNING: broken board data detected\n"); + config = ath25_board.config; + if (is_zero_ether_addr(config->enet0_mac)) { + pr_info("Fixing up empty mac addresses\n"); + config->reset_config_gpio = 0xffff; + config->sys_led_gpio = 0xffff; + random_ether_addr(config->wlan0_mac); + config->wlan0_mac[0] &= ~0x06; + random_ether_addr(config->enet0_mac); + random_ether_addr(config->enet1_mac); + } + } + + /* Radio config starts 0x100 bytes after board config, regardless + * of what the physical layout on the flash chip looks like */ + + rcfg = find_radio_config(flash_limit, bcfg); + if (!rcfg) { + pr_warn("WARNING: Could not find Radio Configuration data\n"); + goto error; + } + + radio_data = board_data + 0x100 + ((rcfg - bcfg) & 0xfff); + ath25_board.radio = radio_data; + offset = radio_data - board_data; + pr_info("Radio config found at offset 0x%x (0x%x)\n", rcfg - bcfg, + offset); + rcfg_size = BOARD_CONFIG_BUFSZ - offset; + memcpy_fromio(radio_data, rcfg, rcfg_size); + + mac_addr = &radio_data[0x1d * 2]; + if (is_broadcast_ether_addr(mac_addr)) { + pr_info("Radio MAC is blank; using board-data\n"); + ether_addr_copy(mac_addr, ath25_board.config->wlan0_mac); + } + + iounmap(flash_base); + + return 0; + +error: + iounmap(flash_base); + return -ENODEV; +} + +static void ath25_halt(void) +{ + local_irq_disable(); + unreachable(); +} + +void __init plat_mem_setup(void) +{ + _machine_halt = ath25_halt; + pm_power_off = ath25_halt; + + if (is_ar5312()) + ar5312_plat_mem_setup(); + else + ar2315_plat_mem_setup(); + + /* Disable data watchpoints */ + write_c0_watchlo0(0); +} + +asmlinkage void plat_irq_dispatch(void) +{ + ath25_irq_dispatch(); +} + +void __init plat_time_init(void) +{ + if (is_ar5312()) + ar5312_plat_time_init(); + else + ar2315_plat_time_init(); +} + +unsigned int __cpuinit get_c0_compare_int(void) +{ + return CP0_LEGACY_COMPARE_IRQ; +} + +void __init arch_init_irq(void) +{ + clear_c0_status(ST0_IM); + mips_cpu_irq_init(); + + /* Initialize interrupt controllers */ + if (is_ar5312()) + ar5312_arch_init_irq(); + else + ar2315_arch_init_irq(); +} diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c new file mode 100644 index 000000000000..7a64567d1ac3 --- /dev/null +++ b/arch/mips/ath25/devices.c @@ -0,0 +1,125 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/serial_8250.h> +#include <linux/platform_device.h> +#include <asm/bootinfo.h> + +#include <ath25_platform.h> +#include "devices.h" +#include "ar5312.h" +#include "ar2315.h" + +struct ar231x_board_config ath25_board; +enum ath25_soc_type ath25_soc = ATH25_SOC_UNKNOWN; + +static struct resource ath25_wmac0_res[] = { + { + .name = "wmac0_membase", + .flags = IORESOURCE_MEM, + }, + { + .name = "wmac0_irq", + .flags = IORESOURCE_IRQ, + } +}; + +static struct resource ath25_wmac1_res[] = { + { + .name = "wmac1_membase", + .flags = IORESOURCE_MEM, + }, + { + .name = "wmac1_irq", + .flags = IORESOURCE_IRQ, + } +}; + +static struct platform_device ath25_wmac[] = { + { + .id = 0, + .name = "ar231x-wmac", + .resource = ath25_wmac0_res, + .num_resources = ARRAY_SIZE(ath25_wmac0_res), + .dev.platform_data = &ath25_board, + }, + { + .id = 1, + .name = "ar231x-wmac", + .resource = ath25_wmac1_res, + .num_resources = ARRAY_SIZE(ath25_wmac1_res), + .dev.platform_data = &ath25_board, + }, +}; + +static const char * const soc_type_strings[] = { + [ATH25_SOC_AR5312] = "Atheros AR5312", + [ATH25_SOC_AR2312] = "Atheros AR2312", + [ATH25_SOC_AR2313] = "Atheros AR2313", + [ATH25_SOC_AR2315] = "Atheros AR2315", + [ATH25_SOC_AR2316] = "Atheros AR2316", + [ATH25_SOC_AR2317] = "Atheros AR2317", + [ATH25_SOC_AR2318] = "Atheros AR2318", + [ATH25_SOC_UNKNOWN] = "Atheros (unknown)", +}; + +const char *get_system_type(void) +{ + if ((ath25_soc >= ARRAY_SIZE(soc_type_strings)) || + !soc_type_strings[ath25_soc]) + return soc_type_strings[ATH25_SOC_UNKNOWN]; + return soc_type_strings[ath25_soc]; +} + +void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk) +{ + struct uart_port s; + + memset(&s, 0, sizeof(s)); + + s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP; + s.iotype = UPIO_MEM32; + s.irq = irq; + s.regshift = 2; + s.mapbase = mapbase; + s.uartclk = uartclk; + + early_serial_setup(&s); +} + +int __init ath25_add_wmac(int nr, u32 base, int irq) +{ + struct resource *res; + + ath25_wmac[nr].dev.platform_data = &ath25_board; + res = &ath25_wmac[nr].resource[0]; + res->start = base; + res->end = base + 0x10000 - 1; + res++; + res->start = irq; + res->end = irq; + return platform_device_register(&ath25_wmac[nr]); +} + +static int __init ath25_register_devices(void) +{ + if (is_ar5312()) + ar5312_init_devices(); + else + ar2315_init_devices(); + + return 0; +} + +device_initcall(ath25_register_devices); + +static int __init ath25_arch_init(void) +{ + if (is_ar5312()) + ar5312_arch_init(); + else + ar2315_arch_init(); + + return 0; +} + +arch_initcall(ath25_arch_init); diff --git a/arch/mips/ath25/devices.h b/arch/mips/ath25/devices.h new file mode 100644 index 000000000000..04d414115356 --- /dev/null +++ b/arch/mips/ath25/devices.h @@ -0,0 +1,43 @@ +#ifndef __ATH25_DEVICES_H +#define __ATH25_DEVICES_H + +#include <linux/cpu.h> + +#define ATH25_REG_MS(_val, _field) (((_val) & _field##_M) >> _field##_S) + +#define ATH25_IRQ_CPU_CLOCK (MIPS_CPU_IRQ_BASE + 7) /* C0_CAUSE: 0x8000 */ + +enum ath25_soc_type { + /* handled by ar5312.c */ + ATH25_SOC_AR2312, + ATH25_SOC_AR2313, + ATH25_SOC_AR5312, + + /* handled by ar2315.c */ + ATH25_SOC_AR2315, + ATH25_SOC_AR2316, + ATH25_SOC_AR2317, + ATH25_SOC_AR2318, + + ATH25_SOC_UNKNOWN +}; + +extern enum ath25_soc_type ath25_soc; +extern struct ar231x_board_config ath25_board; +extern void (*ath25_irq_dispatch)(void); + +int ath25_find_config(phys_addr_t offset, unsigned long size); +void ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk); +int ath25_add_wmac(int nr, u32 base, int irq); + +static inline bool is_ar2315(void) +{ + return (current_cpu_data.cputype == CPU_4KEC); +} + +static inline bool is_ar5312(void) +{ + return !is_ar2315(); +} + +#endif diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c new file mode 100644 index 000000000000..36035b628161 --- /dev/null +++ b/arch/mips/ath25/early_printk.c @@ -0,0 +1,44 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org> + */ + +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/serial_reg.h> + +#include "devices.h" +#include "ar2315_regs.h" +#include "ar5312_regs.h" + +static inline void prom_uart_wr(void __iomem *base, unsigned reg, + unsigned char ch) +{ + __raw_writel(ch, base + 4 * reg); +} + +static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg) +{ + return __raw_readl(base + 4 * reg); +} + +void prom_putchar(unsigned char ch) +{ + static void __iomem *base; + + if (unlikely(base == NULL)) { + if (is_ar2315()) + base = (void __iomem *)(KSEG1ADDR(AR2315_UART0_BASE)); + else + base = (void __iomem *)(KSEG1ADDR(AR5312_UART0_BASE)); + } + + while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0) + ; + prom_uart_wr(base, UART_TX, ch); + while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0) + ; +} diff --git a/arch/mips/ath25/prom.c b/arch/mips/ath25/prom.c new file mode 100644 index 000000000000..edf82be8870d --- /dev/null +++ b/arch/mips/ath25/prom.c @@ -0,0 +1,26 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright MontaVista Software Inc + * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. + * Copyright (C) 2006 FON Technology, SL. + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> + */ + +/* + * Prom setup file for AR5312/AR231x SoCs + */ + +#include <linux/init.h> +#include <asm/bootinfo.h> + +void __init prom_init(void) +{ +} + +void __init prom_free_prom_memory(void) +{ +} diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 9c0e1761773f..6adae366f11a 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -359,7 +359,6 @@ void __init arch_init_irq(void) BUG(); } - cp0_perfcount_irq = ATH79_MISC_IRQ(5); mips_cpu_irq_init(); ath79_misc_irq_init(); diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c index e9cbd7c2918f..e1fe63051136 100644 --- a/arch/mips/ath79/prom.c +++ b/arch/mips/ath79/prom.c @@ -13,42 +13,24 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/string.h> +#include <linux/initrd.h> #include <asm/bootinfo.h> #include <asm/addrspace.h> +#include <asm/fw/fw.h> #include "common.h" -static inline int is_valid_ram_addr(void *addr) -{ - if (((u32) addr > KSEG0) && - ((u32) addr < (KSEG0 + ATH79_MEM_SIZE_MAX))) - return 1; - - if (((u32) addr > KSEG1) && - ((u32) addr < (KSEG1 + ATH79_MEM_SIZE_MAX))) - return 1; - - return 0; -} - -static __init void ath79_prom_init_cmdline(int argc, char **argv) -{ - int i; - - if (!is_valid_ram_addr(argv)) - return; - - for (i = 0; i < argc; i++) - if (is_valid_ram_addr(argv[i])) { - strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); - strlcat(arcs_cmdline, argv[i], sizeof(arcs_cmdline)); - } -} - void __init prom_init(void) { - ath79_prom_init_cmdline(fw_arg0, (char **)fw_arg1); + fw_init_cmdline(); + + /* Read the initrd address from the firmware environment */ + initrd_start = fw_getenvl("initrd_start"); + if (initrd_start) { + initrd_start = KSEG0ADDR(initrd_start); + initrd_end = initrd_start + fw_getenvl("initrd_size"); + } } void __init prom_free_prom_memory(void) diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 64807a4809d0..a73c93c3d44a 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -182,6 +182,11 @@ const char *get_system_type(void) return ath79_sys_type; } +int get_c0_perfcount_int(void) +{ + return ATH79_MISC_IRQ(5); +} + unsigned int get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; diff --git a/arch/mips/bcm3384/Makefile b/arch/mips/bcm3384/Makefile new file mode 100644 index 000000000000..a393955cba08 --- /dev/null +++ b/arch/mips/bcm3384/Makefile @@ -0,0 +1 @@ +obj-y += setup.o irq.o dma.o diff --git a/arch/mips/bcm3384/Platform b/arch/mips/bcm3384/Platform new file mode 100644 index 000000000000..8e1ca0819e1b --- /dev/null +++ b/arch/mips/bcm3384/Platform @@ -0,0 +1,7 @@ +# +# Broadcom BCM3384 boards +# +platform-$(CONFIG_BCM3384) += bcm3384/ +cflags-$(CONFIG_BCM3384) += \ + -I$(srctree)/arch/mips/include/asm/mach-bcm3384/ +load-$(CONFIG_BCM3384) := 0xffffffff80010000 diff --git a/arch/mips/bcm3384/dma.c b/arch/mips/bcm3384/dma.c new file mode 100644 index 000000000000..ea42012fd4f5 --- /dev/null +++ b/arch/mips/bcm3384/dma.c @@ -0,0 +1,81 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com> + */ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <dma-coherence.h> + +/* + * BCM3384 has configurable address translation windows which allow the + * peripherals' DMA addresses to be different from the Zephyr-visible + * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000 + * + * If our DT "memory" node has a "dma-xor-mask" property we will enable this + * translation using the provided offset. + */ +static u32 bcm3384_dma_xor_mask; +static u32 bcm3384_dma_xor_limit = 0xffffffff; + +/* + * PCI collapses the memory hole at 0x10000000 - 0x1fffffff. + * On systems with a dma-xor-mask, this range is guaranteed to live above + * the dma-xor-limit. + */ +#define BCM3384_MEM_HOLE_PA 0x10000000 +#define BCM3384_MEM_HOLE_SIZE 0x10000000 + +static dma_addr_t bcm3384_phys_to_dma(struct device *dev, phys_addr_t pa) +{ + if (dev && dev_is_pci(dev) && + pa >= (BCM3384_MEM_HOLE_PA + BCM3384_MEM_HOLE_SIZE)) + return pa - BCM3384_MEM_HOLE_SIZE; + if (pa <= bcm3384_dma_xor_limit) + return pa ^ bcm3384_dma_xor_mask; + return pa; +} + +dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) +{ + return bcm3384_phys_to_dma(dev, virt_to_phys(addr)); +} + +dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) +{ + return bcm3384_phys_to_dma(dev, page_to_phys(page)); +} + +unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr) +{ + if (dev && dev_is_pci(dev) && + dma_addr >= BCM3384_MEM_HOLE_PA) + return dma_addr + BCM3384_MEM_HOLE_SIZE; + if ((dma_addr ^ bcm3384_dma_xor_mask) <= bcm3384_dma_xor_limit) + return dma_addr ^ bcm3384_dma_xor_mask; + return dma_addr; +} + +static int __init bcm3384_init_dma_xor(void) +{ + struct device_node *np = of_find_node_by_type(NULL, "memory"); + + if (!np) + return 0; + + of_property_read_u32(np, "dma-xor-mask", &bcm3384_dma_xor_mask); + of_property_read_u32(np, "dma-xor-limit", &bcm3384_dma_xor_limit); + + of_node_put(np); + return 0; +} +arch_initcall(bcm3384_init_dma_xor); diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c new file mode 100644 index 000000000000..0fb5134fb832 --- /dev/null +++ b/arch/mips/bcm3384/irq.c @@ -0,0 +1,193 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Partially based on arch/mips/ralink/irq.c + * + * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com> + */ + +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <asm/bmips.h> +#include <asm/irq_cpu.h> +#include <asm/mipsregs.h> + +/* INTC register offsets */ +#define INTC_REG_ENABLE 0x00 +#define INTC_REG_STATUS 0x04 + +#define MAX_WORDS 2 +#define IRQS_PER_WORD 32 + +struct bcm3384_intc { + int n_words; + void __iomem *reg[MAX_WORDS]; + u32 enable[MAX_WORDS]; + spinlock_t lock; +}; + +static void bcm3384_intc_irq_unmask(struct irq_data *d) +{ + struct bcm3384_intc *priv = d->domain->host_data; + unsigned long flags; + int idx = d->hwirq / IRQS_PER_WORD; + int bit = d->hwirq % IRQS_PER_WORD; + + spin_lock_irqsave(&priv->lock, flags); + priv->enable[idx] |= BIT(bit); + __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void bcm3384_intc_irq_mask(struct irq_data *d) +{ + struct bcm3384_intc *priv = d->domain->host_data; + unsigned long flags; + int idx = d->hwirq / IRQS_PER_WORD; + int bit = d->hwirq % IRQS_PER_WORD; + + spin_lock_irqsave(&priv->lock, flags); + priv->enable[idx] &= ~BIT(bit); + __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static struct irq_chip bcm3384_intc_irq_chip = { + .name = "INTC", + .irq_unmask = bcm3384_intc_irq_unmask, + .irq_mask = bcm3384_intc_irq_mask, + .irq_mask_ack = bcm3384_intc_irq_mask, +}; + +unsigned int get_c0_compare_int(void) +{ + return CP0_LEGACY_COMPARE_IRQ; +} + +static void bcm3384_intc_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct irq_domain *domain = irq_get_handler_data(irq); + struct bcm3384_intc *priv = domain->host_data; + unsigned long flags; + unsigned int idx; + + for (idx = 0; idx < priv->n_words; idx++) { + unsigned long pending; + int hwirq; + + spin_lock_irqsave(&priv->lock, flags); + pending = __raw_readl(priv->reg[idx] + INTC_REG_STATUS) & + priv->enable[idx]; + spin_unlock_irqrestore(&priv->lock, flags); + + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { + generic_handle_irq(irq_find_mapping(domain, + hwirq + idx * IRQS_PER_WORD)); + } + } +} + +asmlinkage void plat_irq_dispatch(void) +{ + unsigned long pending = + (read_c0_status() & read_c0_cause() & ST0_IM) >> STATUSB_IP0; + int bit; + + for_each_set_bit(bit, &pending, 8) + do_IRQ(MIPS_CPU_IRQ_BASE + bit); +} + +static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &bcm3384_intc_irq_chip, handle_level_irq); + return 0; +} + +static const struct irq_domain_ops irq_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = intc_map, +}; + +static int __init ioremap_one_pair(struct bcm3384_intc *priv, + struct device_node *node, + int idx) +{ + struct resource res; + + if (of_address_to_resource(node, idx, &res)) + return 0; + + if (request_mem_region(res.start, resource_size(&res), + res.name) < 0) + pr_err("Failed to request INTC register region\n"); + + priv->reg[idx] = ioremap_nocache(res.start, resource_size(&res)); + if (!priv->reg[idx]) + panic("Failed to ioremap INTC register range"); + + /* start up with everything masked before we hook the parent IRQ */ + __raw_writel(0, priv->reg[idx] + INTC_REG_ENABLE); + priv->enable[idx] = 0; + + return IRQS_PER_WORD; +} + +static int __init intc_of_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *domain; + unsigned int parent_irq, n_irqs = 0; + struct bcm3384_intc *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + panic("Failed to allocate bcm3384_intc struct"); + + spin_lock_init(&priv->lock); + + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) + panic("Failed to get INTC IRQ"); + + n_irqs += ioremap_one_pair(priv, node, 0); + n_irqs += ioremap_one_pair(priv, node, 1); + + if (!n_irqs) + panic("Failed to map INTC registers"); + + priv->n_words = n_irqs / IRQS_PER_WORD; + domain = irq_domain_add_linear(node, n_irqs, &irq_domain_ops, priv); + if (!domain) + panic("Failed to add irqdomain"); + + irq_set_chained_handler(parent_irq, bcm3384_intc_irq_handler); + irq_set_handler_data(parent_irq, domain); + + return 0; +} + +static struct of_device_id of_irq_ids[] __initdata = { + { .compatible = "mti,cpu-interrupt-controller", + .data = mips_cpu_intc_init }, + { .compatible = "brcm,bcm3384-intc", + .data = intc_of_init }, + {}, +}; + +void __init arch_init_irq(void) +{ + bmips_tp1_irqs = 0; + of_irq_init(of_irq_ids); +} diff --git a/arch/mips/bcm3384/setup.c b/arch/mips/bcm3384/setup.c new file mode 100644 index 000000000000..d84b8400b874 --- /dev/null +++ b/arch/mips/bcm3384/setup.c @@ -0,0 +1,97 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com> + */ + +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/clk-provider.h> +#include <linux/ioport.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_platform.h> +#include <linux/smp.h> +#include <asm/addrspace.h> +#include <asm/bmips.h> +#include <asm/bootinfo.h> +#include <asm/prom.h> +#include <asm/smp-ops.h> +#include <asm/time.h> + +void __init prom_init(void) +{ + register_bmips_smp_ops(); +} + +void __init prom_free_prom_memory(void) +{ +} + +const char *get_system_type(void) +{ + return "BCM3384"; +} + +void __init plat_time_init(void) +{ + struct device_node *np; + u32 freq; + + np = of_find_node_by_name(NULL, "cpus"); + if (!np) + panic("missing 'cpus' DT node"); + if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0) + panic("missing 'mips-hpt-frequency' property"); + of_node_put(np); + + mips_hpt_frequency = freq; +} + +void __init plat_mem_setup(void) +{ + void *dtb = __dtb_start; + + set_io_port_base(0); + ioport_resource.start = 0; + ioport_resource.end = ~0; + + /* intended to somewhat resemble ARM; see Documentation/arm/Booting */ + if (fw_arg0 == 0 && fw_arg1 == 0xffffffff) + dtb = phys_to_virt(fw_arg2); + + __dt_setup_arch(dtb); + + strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); +} + +void __init device_tree_init(void) +{ + struct device_node *np; + + unflatten_and_copy_device_tree(); + + /* Disable SMP boot unless both CPUs are listed in DT and !disabled */ + np = of_find_node_by_name(NULL, "cpus"); + if (np && of_get_available_child_count(np) <= 1) + bmips_smp_enabled = 0; + of_node_put(np); +} + +int __init plat_of_setup(void) +{ + return __dt_register_buses("brcm,bcm3384", "simple-bus"); +} + +arch_initcall(plat_of_setup); + +static int __init plat_dev_init(void) +{ + of_clk_init(NULL); + return 0; +} + +device_initcall(plat_dev_init); diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h index f1cc9d0495d8..ea909a56a3ee 100644 --- a/arch/mips/bcm47xx/bcm47xx_private.h +++ b/arch/mips/bcm47xx/bcm47xx_private.h @@ -6,12 +6,18 @@ /* prom.c */ void __init bcm47xx_prom_highmem_init(void); +/* sprom.c */ +void bcm47xx_sprom_register_fallbacks(void); + /* buttons.c */ int __init bcm47xx_buttons_register(void); /* leds.c */ void __init bcm47xx_leds_register(void); +/* setup.c */ +void __init bcm47xx_bus_setup(void); + /* workarounds.c */ void __init bcm47xx_workarounds(void); diff --git a/arch/mips/bcm47xx/irq.c b/arch/mips/bcm47xx/irq.c index e0585b76ec19..21b4497f09be 100644 --- a/arch/mips/bcm47xx/irq.c +++ b/arch/mips/bcm47xx/irq.c @@ -22,6 +22,8 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include "bcm47xx_private.h" + #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> @@ -65,6 +67,12 @@ DEFINE_HWx_IRQDISPATCH(7) void __init arch_init_irq(void) { + /* + * This is the first arch callback after mm_init (we can use kmalloc), + * so let's finish bus initialization now. + */ + bcm47xx_bus_setup(); + #ifdef CONFIG_BCM47XX_BCMA if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core, diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c index 2bed73a684ae..c5c381c43f17 100644 --- a/arch/mips/bcm47xx/nvram.c +++ b/arch/mips/bcm47xx/nvram.c @@ -13,24 +13,35 @@ #include <linux/types.h> #include <linux/module.h> -#include <linux/ssb/ssb.h> #include <linux/kernel.h> #include <linux/string.h> -#include <asm/addrspace.h> +#include <linux/mtd/mtd.h> #include <bcm47xx_nvram.h> -#include <asm/mach-bcm47xx/bcm47xx.h> + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_SPACE 0x8000 + +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +struct nvram_header { + u32 magic; + u32 len; + u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + u32 config_ncdl; /* ncdl values for memc */ +}; static char nvram_buf[NVRAM_SPACE]; static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000}; -static u32 find_nvram_size(u32 end) +static u32 find_nvram_size(void __iomem *end) { - struct nvram_header *header; + struct nvram_header __iomem *header; int i; for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { - header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]); - if (header->magic == NVRAM_HEADER) + header = (struct nvram_header *)(end - nvram_sizes[i]); + if (header->magic == NVRAM_MAGIC) return nvram_sizes[i]; } @@ -38,36 +49,40 @@ static u32 find_nvram_size(u32 end) } /* Probe for NVRAM header */ -static int nvram_find_and_copy(u32 base, u32 lim) +static int nvram_find_and_copy(void __iomem *iobase, u32 lim) { - struct nvram_header *header; + struct nvram_header __iomem *header; int i; u32 off; u32 *src, *dst; u32 size; + if (nvram_buf[0]) { + pr_warn("nvram already initialized\n"); + return -EEXIST; + } + /* TODO: when nvram is on nand flash check for bad blocks first. */ off = FLASH_MIN; while (off <= lim) { /* Windowed flash access */ - size = find_nvram_size(base + off); + size = find_nvram_size(iobase + off); if (size) { - header = (struct nvram_header *)KSEG1ADDR(base + off - - size); + header = (struct nvram_header *)(iobase + off - size); goto found; } off <<= 1; } /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ - header = (struct nvram_header *) KSEG1ADDR(base + 4096); - if (header->magic == NVRAM_HEADER) { + header = (struct nvram_header *)(iobase + 4096); + if (header->magic == NVRAM_MAGIC) { size = NVRAM_SPACE; goto found; } - header = (struct nvram_header *) KSEG1ADDR(base + 1024); - if (header->magic == NVRAM_HEADER) { + header = (struct nvram_header *)(iobase + 1024); + if (header->magic == NVRAM_MAGIC) { size = NVRAM_SPACE; goto found; } @@ -94,71 +109,73 @@ found: return 0; } -#ifdef CONFIG_BCM47XX_SSB -static int nvram_init_ssb(void) +/* + * On bcm47xx we need access to the NVRAM very early, so we can't use mtd + * subsystem to access flash. We can't even use platform device / driver to + * store memory offset. + * To handle this we provide following symbol. It's supposed to be called as + * soon as we get info about flash device, before any NVRAM entry is needed. + */ +int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) { - struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore; - u32 base; - u32 lim; - - if (mcore->pflash.present) { - base = mcore->pflash.window; - lim = mcore->pflash.window_size; - } else { - pr_err("Couldn't find supported flash memory\n"); - return -ENXIO; - } + void __iomem *iobase; + int err; - return nvram_find_and_copy(base, lim); -} -#endif + iobase = ioremap_nocache(base, lim); + if (!iobase) + return -ENOMEM; -#ifdef CONFIG_BCM47XX_BCMA -static int nvram_init_bcma(void) -{ - struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc; - u32 base; - u32 lim; - -#ifdef CONFIG_BCMA_NFLASH - if (cc->nflash.boot) { - base = BCMA_SOC_FLASH1; - lim = BCMA_SOC_FLASH1_SZ; - } else -#endif - if (cc->pflash.present) { - base = cc->pflash.window; - lim = cc->pflash.window_size; -#ifdef CONFIG_BCMA_SFLASH - } else if (cc->sflash.present) { - base = cc->sflash.window; - lim = cc->sflash.size; -#endif - } else { - pr_err("Couldn't find supported flash memory\n"); - return -ENXIO; - } + err = nvram_find_and_copy(iobase, lim); + + iounmap(iobase); - return nvram_find_and_copy(base, lim); + return err; } -#endif static int nvram_init(void) { - switch (bcm47xx_bus_type) { -#ifdef CONFIG_BCM47XX_SSB - case BCM47XX_BUS_TYPE_SSB: - return nvram_init_ssb(); -#endif -#ifdef CONFIG_BCM47XX_BCMA - case BCM47XX_BUS_TYPE_BCMA: - return nvram_init_bcma(); -#endif +#ifdef CONFIG_MTD + struct mtd_info *mtd; + struct nvram_header header; + size_t bytes_read; + int err, i; + + mtd = get_mtd_device_nm("nvram"); + if (IS_ERR(mtd)) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { + loff_t from = mtd->size - nvram_sizes[i]; + + if (from < 0) + continue; + + err = mtd_read(mtd, from, sizeof(header), &bytes_read, + (uint8_t *)&header); + if (!err && header.magic == NVRAM_MAGIC) { + u8 *dst = (uint8_t *)nvram_buf; + size_t len = header.len; + + if (header.len > NVRAM_SPACE) { + pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", + header.len, NVRAM_SPACE); + len = NVRAM_SPACE; + } + + err = mtd_read(mtd, from, len, &bytes_read, dst); + if (err) + return err; + memset(dst + bytes_read, 0x0, NVRAM_SPACE - bytes_read); + + return 0; + } } +#endif + return -ENXIO; } -int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len) +int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len) { char *var, *value, *end, *eq; int err; diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index c00585d915bc..e43b5046cb30 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -102,23 +102,6 @@ static void bcm47xx_machine_halt(void) } #ifdef CONFIG_BCM47XX_SSB -static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out) -{ - char prefix[10]; - - if (bus->bustype == SSB_BUSTYPE_PCI) { - memset(out, 0, sizeof(struct ssb_sprom)); - snprintf(prefix, sizeof(prefix), "pci/%u/%u/", - bus->host_pci->bus->number + 1, - PCI_SLOT(bus->host_pci->devfn)); - bcm47xx_fill_sprom(out, prefix, false); - return 0; - } else { - printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n"); - return -EINVAL; - } -} - static int bcm47xx_get_invariants(struct ssb_bus *bus, struct ssb_init_invariants *iv) { @@ -144,11 +127,6 @@ static void __init bcm47xx_register_ssb(void) char buf[100]; struct ssb_mipscore *mcore; - err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb); - if (err) - printk(KERN_WARNING "bcm47xx: someone else already registered" - " a ssb SPROM callback handler (err %d)\n", err); - err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE, bcm47xx_get_invariants); if (err) @@ -171,56 +149,21 @@ static void __init bcm47xx_register_ssb(void) #endif #ifdef CONFIG_BCM47XX_BCMA -static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out) -{ - char prefix[10]; - struct bcma_device *core; - - switch (bus->hosttype) { - case BCMA_HOSTTYPE_PCI: - memset(out, 0, sizeof(struct ssb_sprom)); - snprintf(prefix, sizeof(prefix), "pci/%u/%u/", - bus->host_pci->bus->number + 1, - PCI_SLOT(bus->host_pci->devfn)); - bcm47xx_fill_sprom(out, prefix, false); - return 0; - case BCMA_HOSTTYPE_SOC: - memset(out, 0, sizeof(struct ssb_sprom)); - core = bcma_find_core(bus, BCMA_CORE_80211); - if (core) { - snprintf(prefix, sizeof(prefix), "sb/%u/", - core->core_index); - bcm47xx_fill_sprom(out, prefix, true); - } else { - bcm47xx_fill_sprom(out, NULL, false); - } - return 0; - default: - pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n"); - return -EINVAL; - } -} - static void __init bcm47xx_register_bcma(void) { int err; - err = bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma); - if (err) - pr_warn("bcm47xx: someone else already registered a bcma SPROM callback handler (err %d)\n", err); - err = bcma_host_soc_register(&bcm47xx_bus.bcma); if (err) panic("Failed to register BCMA bus (err %d)", err); - - err = bcma_host_soc_init(&bcm47xx_bus.bcma); - if (err) - panic("Failed to initialize BCMA bus (err %d)", err); - - bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo, NULL); } #endif +/* + * Memory setup is done in the early part of MIPS's arch_mem_init. It's supposed + * to detect memory and record it with add_memory_region. + * Any extra initializaion performed here must not use kmalloc or bootmem. + */ void __init plat_mem_setup(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -229,6 +172,7 @@ void __init plat_mem_setup(void) printk(KERN_INFO "bcm47xx: using bcma bus\n"); #ifdef CONFIG_BCM47XX_BCMA bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA; + bcm47xx_sprom_register_fallbacks(); bcm47xx_register_bcma(); bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id); #ifdef CONFIG_HIGHMEM @@ -239,6 +183,7 @@ void __init plat_mem_setup(void) printk(KERN_INFO "bcm47xx: using ssb bus\n"); #ifdef CONFIG_BCM47XX_SSB bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB; + bcm47xx_sprom_register_fallbacks(); bcm47xx_register_ssb(); bcm47xx_set_system_type(bcm47xx_bus.ssb.chip_id); #endif @@ -247,6 +192,28 @@ void __init plat_mem_setup(void) _machine_restart = bcm47xx_machine_restart; _machine_halt = bcm47xx_machine_halt; pm_power_off = bcm47xx_machine_halt; +} + +/* + * This finishes bus initialization doing things that were not possible without + * kmalloc. Make sure to call it late enough (after mm_init). + */ +void __init bcm47xx_bus_setup(void) +{ +#ifdef CONFIG_BCM47XX_BCMA + if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { + int err; + + err = bcma_host_soc_init(&bcm47xx_bus.bcma); + if (err) + panic("Failed to initialize BCMA bus (err %d)", err); + + bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo, + NULL); + } +#endif + + /* With bus initialized we can access NVRAM and detect the board */ bcm47xx_board_detect(); mips_set_machine_name(bcm47xx_board_get_name()); } diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c index 41226b68de3d..2eff7fe99c6b 100644 --- a/arch/mips/bcm47xx/sprom.c +++ b/arch/mips/bcm47xx/sprom.c @@ -136,6 +136,20 @@ static void nvram_read_leddc(const char *prefix, const char *name, *leddc_off_time = (val >> 16) & 0xff; } +static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6]) +{ + if (strchr(buf, ':')) + sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], + &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], + &macaddr[5]); + else if (strchr(buf, '-')) + sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0], + &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], + &macaddr[5]); + else + pr_warn("Can not parse mac address: %s\n", buf); +} + static void nvram_read_macaddr(const char *prefix, const char *name, u8 val[6], bool fallback) { @@ -801,3 +815,71 @@ void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo, nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true); } #endif + +#if defined(CONFIG_BCM47XX_SSB) +static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out) +{ + char prefix[10]; + + if (bus->bustype == SSB_BUSTYPE_PCI) { + memset(out, 0, sizeof(struct ssb_sprom)); + snprintf(prefix, sizeof(prefix), "pci/%u/%u/", + bus->host_pci->bus->number + 1, + PCI_SLOT(bus->host_pci->devfn)); + bcm47xx_fill_sprom(out, prefix, false); + return 0; + } else { + pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n"); + return -EINVAL; + } +} +#endif + +#if defined(CONFIG_BCM47XX_BCMA) +static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out) +{ + char prefix[10]; + struct bcma_device *core; + + switch (bus->hosttype) { + case BCMA_HOSTTYPE_PCI: + memset(out, 0, sizeof(struct ssb_sprom)); + snprintf(prefix, sizeof(prefix), "pci/%u/%u/", + bus->host_pci->bus->number + 1, + PCI_SLOT(bus->host_pci->devfn)); + bcm47xx_fill_sprom(out, prefix, false); + return 0; + case BCMA_HOSTTYPE_SOC: + memset(out, 0, sizeof(struct ssb_sprom)); + core = bcma_find_core(bus, BCMA_CORE_80211); + if (core) { + snprintf(prefix, sizeof(prefix), "sb/%u/", + core->core_index); + bcm47xx_fill_sprom(out, prefix, true); + } else { + bcm47xx_fill_sprom(out, NULL, false); + } + return 0; + default: + pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n"); + return -EINVAL; + } +} +#endif + +/* + * On bcm47xx we need to register SPROM fallback handler very early, so we can't + * use anything like platform device / driver for this. + */ +void bcm47xx_sprom_register_fallbacks(void) +{ +#if defined(CONFIG_BCM47XX_SSB) + if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb)) + pr_warn("Failed to registered ssb SPROM handler\n"); +#endif + +#if defined(CONFIG_BCM47XX_BCMA) + if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma)) + pr_warn("Failed to registered bcma SPROM handler\n"); +#endif +} diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index 536f64443031..307ec8b8e41c 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c @@ -263,7 +263,7 @@ static unsigned int detect_memory_size(void) if (BCMCPU_IS_6345()) { val = bcm_sdram_readl(SDRAM_MBASE_REG); - return (val * 8 * 1024 * 1024); + return val * 8 * 1024 * 1024; } if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile index ca9c90e2cabf..4f49fa477f14 100644 --- a/arch/mips/boot/dts/Makefile +++ b/arch/mips/boot/dts/Makefile @@ -1,3 +1,4 @@ +dtb-$(CONFIG_BCM3384) += bcm93384wvg.dtb dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb diff --git a/arch/mips/boot/dts/bcm3384.dtsi b/arch/mips/boot/dts/bcm3384.dtsi new file mode 100644 index 000000000000..21b074a99c94 --- /dev/null +++ b/arch/mips/boot/dts/bcm3384.dtsi @@ -0,0 +1,109 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "brcm,bcm3384", "brcm,bcm33843"; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + /* On BMIPS5000 this is 1/8th of the CPU core clock */ + mips-hpt-frequency = <100000000>; + + cpu@0 { + compatible = "brcm,bmips5000"; + device_type = "cpu"; + reg = <0>; + }; + + cpu@1 { + compatible = "brcm,bmips5000"; + device_type = "cpu"; + reg = <1>; + }; + }; + + clocks { + #address-cells = <1>; + #size-cells = <0>; + + periph_clk: periph_clk@0 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <54000000>; + }; + }; + + aliases { + uart0 = &uart0; + }; + + cpu_intc: cpu_intc@0 { + #address-cells = <0>; + compatible = "mti,cpu-interrupt-controller"; + + interrupt-controller; + #interrupt-cells = <1>; + }; + + periph_intc: periph_intc@14e00038 { + compatible = "brcm,bcm3384-intc"; + reg = <0x14e00038 0x8 0x14e00340 0x8>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpu_intc>; + interrupts = <4>; + }; + + zmips_intc: zmips_intc@104b0060 { + compatible = "brcm,bcm3384-intc"; + reg = <0x104b0060 0x8>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&periph_intc>; + interrupts = <29>; + }; + + iop_intc: iop_intc@14e00058 { + compatible = "brcm,bcm3384-intc"; + reg = <0x14e00058 0x8>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpu_intc>; + interrupts = <6>; + }; + + uart0: serial@14e00520 { + compatible = "brcm,bcm6345-uart"; + reg = <0x14e00520 0x18>; + interrupt-parent = <&periph_intc>; + interrupts = <2>; + clocks = <&periph_clk>; + status = "disabled"; + }; + + ehci0: usb@15400300 { + compatible = "brcm,bcm3384-ehci", "generic-ehci"; + reg = <0x15400300 0x100>; + big-endian; + interrupt-parent = <&periph_intc>; + interrupts = <41>; + status = "disabled"; + }; + + ohci0: usb@15400400 { + compatible = "brcm,bcm3384-ohci", "generic-ohci"; + reg = <0x15400400 0x100>; + big-endian; + no-big-frame-no; + interrupt-parent = <&periph_intc>; + interrupts = <40>; + status = "disabled"; + }; +}; diff --git a/arch/mips/boot/dts/bcm93384wvg.dts b/arch/mips/boot/dts/bcm93384wvg.dts new file mode 100644 index 000000000000..831741179212 --- /dev/null +++ b/arch/mips/boot/dts/bcm93384wvg.dts @@ -0,0 +1,32 @@ +/dts-v1/; + +/include/ "bcm3384.dtsi" + +/ { + compatible = "brcm,bcm93384wvg", "brcm,bcm3384"; + model = "Broadcom BCM93384WVG"; + + chosen { + bootargs = "console=ttyS0,115200"; + stdout-path = &uart0; + }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x04000000>; + dma-xor-mask = <0x08000000>; + dma-xor-limit = <0x0fffffff>; + }; +}; + +&uart0 { + status = "okay"; +}; + +&ehci0 { + status = "okay"; +}; + +&ohci0 { + status = "okay"; +}; diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 02f244475207..3778655c4a37 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -262,8 +262,8 @@ char *octeon_swiotlb; void __init plat_swiotlb_setup(void) { int i; - phys_t max_addr; - phys_t addr_size; + phys_addr_t max_addr; + phys_addr_t addr_size; size_t swiotlbsize; unsigned long swiotlb_nslabs; diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c index f4c1b36fdf65..e15b049b3bd7 100644 --- a/arch/mips/cavium-octeon/executive/octeon-model.c +++ b/arch/mips/cavium-octeon/executive/octeon-model.c @@ -28,22 +28,23 @@ #include <asm/octeon/octeon.h> /** - * Given the chip processor ID from COP0, this function returns a - * string representing the chip model number. The string is of the - * form CNXXXXpX.X-FREQ-SUFFIX. - * - XXXX = The chip model number - * - X.X = Chip pass number - * - FREQ = Current frequency in Mhz - * - SUFFIX = NSP, EXP, SCP, SSP, or CP - * - * @chip_id: Chip ID + * Read a byte of fuse data + * @byte_addr: address to read * - * Returns Model string + * Returns fuse value: 0 or 1 */ -const char *octeon_model_get_string(uint32_t chip_id) +static uint8_t __init cvmx_fuse_read_byte(int byte_addr) { - static char buffer[32]; - return octeon_model_get_string_buffer(chip_id, buffer); + union cvmx_mio_fus_rcmd read_cmd; + + read_cmd.u64 = 0; + read_cmd.s.addr = byte_addr; + read_cmd.s.pend = 1; + cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64); + while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) + && read_cmd.s.pend) + ; + return read_cmd.s.dat; } /* @@ -51,7 +52,8 @@ const char *octeon_model_get_string(uint32_t chip_id) * as running early in u-boot static/global variables don't work when * running from flash. */ -const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) +static const char *__init octeon_model_get_string_buffer(uint32_t chip_id, + char *buffer) { const char *family; const char *core_model; @@ -407,3 +409,22 @@ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix); return buffer; } + +/** + * Given the chip processor ID from COP0, this function returns a + * string representing the chip model number. The string is of the + * form CNXXXXpX.X-FREQ-SUFFIX. + * - XXXX = The chip model number + * - X.X = Chip pass number + * - FREQ = Current frequency in Mhz + * - SUFFIX = NSP, EXP, SCP, SSP, or CP + * + * @chip_id: Chip ID + * + * Returns Model string + */ +const char *__init octeon_model_get_string(uint32_t chip_id) +{ + static char buffer[32]; + return octeon_model_get_string_buffer(chip_id, buffer); +} diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 6df0f4d8f197..b67ddf0f8bcd 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -7,22 +7,27 @@ * Copyright (C) 2008 Wind River Systems */ +#include <linux/delay.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/i2c.h> #include <linux/usb.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/of_fdt.h> #include <linux/libfdt.h> +#include <linux/usb/ehci_pdriver.h> +#include <linux/usb/ohci_pdriver.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-rnm-defs.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-helper-board.h> +#include <asm/octeon/cvmx-uctlx-defs.h> /* Octeon Random Number Generator. */ static int __init octeon_rng_device_init(void) @@ -68,6 +73,229 @@ device_initcall(octeon_rng_device_init); #ifdef CONFIG_USB +static DEFINE_MUTEX(octeon2_usb_clocks_mutex); + +static int octeon2_usb_clock_start_cnt; + +static void octeon2_usb_clocks_start(void) +{ + u64 div; + union cvmx_uctlx_if_ena if_ena; + union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; + union cvmx_uctlx_uphy_ctl_status uphy_ctl_status; + union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status; + int i; + unsigned long io_clk_64_to_ns; + + + mutex_lock(&octeon2_usb_clocks_mutex); + + octeon2_usb_clock_start_cnt++; + if (octeon2_usb_clock_start_cnt != 1) + goto exit; + + io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate(); + + /* + * Step 1: Wait for voltages stable. That surely happened + * before starting the kernel. + * + * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1 + */ + if_ena.u64 = 0; + if_ena.s.en = 1; + cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64); + + /* Step 3: Configure the reference clock, PHY, and HCLK */ + clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); + + /* + * If the UCTL looks like it has already been started, skip + * the initialization, otherwise bus errors are obtained. + */ + if (clk_rst_ctl.s.hrst) + goto end_clock; + /* 3a */ + clk_rst_ctl.s.p_por = 1; + clk_rst_ctl.s.hrst = 0; + clk_rst_ctl.s.p_prst = 0; + clk_rst_ctl.s.h_clkdiv_rst = 0; + clk_rst_ctl.s.o_clkdiv_rst = 0; + clk_rst_ctl.s.h_clkdiv_en = 0; + clk_rst_ctl.s.o_clkdiv_en = 0; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* 3b */ + /* 12MHz crystal. */ + clk_rst_ctl.s.p_refclk_sel = 0; + clk_rst_ctl.s.p_refclk_div = 0; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* 3c */ + div = octeon_get_io_clock_rate() / 130000000ull; + + switch (div) { + case 0: + div = 1; + break; + case 1: + case 2: + case 3: + case 4: + break; + case 5: + div = 4; + break; + case 6: + case 7: + div = 6; + break; + case 8: + case 9: + case 10: + case 11: + div = 8; + break; + default: + div = 12; + break; + } + clk_rst_ctl.s.h_div = div; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + /* Read it back, */ + clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); + clk_rst_ctl.s.h_clkdiv_en = 1; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + /* 3d */ + clk_rst_ctl.s.h_clkdiv_rst = 1; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* 3e: delay 64 io clocks */ + ndelay(io_clk_64_to_ns); + + /* + * Step 4: Program the power-on reset field in the UCTL + * clock-reset-control register. + */ + clk_rst_ctl.s.p_por = 0; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* Step 5: Wait 1 ms for the PHY clock to start. */ + mdelay(1); + + /* + * Step 6: Program the reset input from automatic test + * equipment field in the UPHY CSR + */ + uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0)); + uphy_ctl_status.s.ate_reset = 1; + cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); + + /* Step 7: Wait for at least 10ns. */ + ndelay(10); + + /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */ + uphy_ctl_status.s.ate_reset = 0; + cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); + + /* + * Step 9: Wait for at least 20ns for UPHY to output PHY clock + * signals and OHCI_CLK48 + */ + ndelay(20); + + /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */ + /* 10a */ + clk_rst_ctl.s.o_clkdiv_rst = 1; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* 10b */ + clk_rst_ctl.s.o_clkdiv_en = 1; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* 10c */ + ndelay(io_clk_64_to_ns); + + /* + * Step 11: Program the PHY reset field: + * UCTL0_CLK_RST_CTL[P_PRST] = 1 + */ + clk_rst_ctl.s.p_prst = 1; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + + /* Step 12: Wait 1 uS. */ + udelay(1); + + /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */ + clk_rst_ctl.s.hrst = 1; + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); + +end_clock: + /* Now we can set some other registers. */ + + for (i = 0; i <= 1; i++) { + port_ctl_status.u64 = + cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); + /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ + port_ctl_status.s.txvreftune = 15; + port_ctl_status.s.txrisetune = 1; + port_ctl_status.s.txpreemphasistune = 1; + cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), + port_ctl_status.u64); + } + + /* Set uSOF cycle period to 60,000 bits. */ + cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull); +exit: + mutex_unlock(&octeon2_usb_clocks_mutex); +} + +static void octeon2_usb_clocks_stop(void) +{ + mutex_lock(&octeon2_usb_clocks_mutex); + octeon2_usb_clock_start_cnt--; + mutex_unlock(&octeon2_usb_clocks_mutex); +} + +static int octeon_ehci_power_on(struct platform_device *pdev) +{ + octeon2_usb_clocks_start(); + return 0; +} + +static void octeon_ehci_power_off(struct platform_device *pdev) +{ + octeon2_usb_clocks_stop(); +} + +static struct usb_ehci_pdata octeon_ehci_pdata = { + /* Octeon EHCI matches CPU endianness. */ +#ifdef __BIG_ENDIAN + .big_endian_mmio = 1, +#endif + .power_on = octeon_ehci_power_on, + .power_off = octeon_ehci_power_off, +}; + +static void __init octeon_ehci_hw_start(void) +{ + union cvmx_uctlx_ehci_ctl ehci_ctl; + + octeon2_usb_clocks_start(); + + ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0)); + /* Use 64-bit addressing. */ + ehci_ctl.s.ehci_64b_addr_en = 1; + ehci_ctl.s.l2c_addr_msb = 0; + ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */ + ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */ + cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64); + + octeon2_usb_clocks_stop(); +} + +static u64 octeon_ehci_dma_mask = DMA_BIT_MASK(64); + static int __init octeon_ehci_device_init(void) { struct platform_device *pd; @@ -88,7 +316,7 @@ static int __init octeon_ehci_device_init(void) if (octeon_is_simulation() || usb_disabled()) return 0; /* No USB in the simulator. */ - pd = platform_device_alloc("octeon-ehci", 0); + pd = platform_device_alloc("ehci-platform", 0); if (!pd) { ret = -ENOMEM; goto out; @@ -105,6 +333,10 @@ static int __init octeon_ehci_device_init(void) if (ret) goto fail; + pd->dev.dma_mask = &octeon_ehci_dma_mask; + pd->dev.platform_data = &octeon_ehci_pdata; + octeon_ehci_hw_start(); + ret = platform_device_add(pd); if (ret) goto fail; @@ -117,6 +349,41 @@ out: } device_initcall(octeon_ehci_device_init); +static int octeon_ohci_power_on(struct platform_device *pdev) +{ + octeon2_usb_clocks_start(); + return 0; +} + +static void octeon_ohci_power_off(struct platform_device *pdev) +{ + octeon2_usb_clocks_stop(); +} + +static struct usb_ohci_pdata octeon_ohci_pdata = { + /* Octeon OHCI matches CPU endianness. */ +#ifdef __BIG_ENDIAN + .big_endian_mmio = 1, +#endif + .power_on = octeon_ohci_power_on, + .power_off = octeon_ohci_power_off, +}; + +static void __init octeon_ohci_hw_start(void) +{ + union cvmx_uctlx_ohci_ctl ohci_ctl; + + octeon2_usb_clocks_start(); + + ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0)); + ohci_ctl.s.l2c_addr_msb = 0; + ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */ + ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */ + cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64); + + octeon2_usb_clocks_stop(); +} + static int __init octeon_ohci_device_init(void) { struct platform_device *pd; @@ -137,7 +404,7 @@ static int __init octeon_ohci_device_init(void) if (octeon_is_simulation() || usb_disabled()) return 0; /* No USB in the simulator. */ - pd = platform_device_alloc("octeon-ohci", 0); + pd = platform_device_alloc("ohci-platform", 0); if (!pd) { ret = -ENOMEM; goto out; @@ -154,6 +421,9 @@ static int __init octeon_ohci_device_init(void) if (ret) goto fail; + pd->dev.platform_data = &octeon_ohci_pdata; + octeon_ohci_hw_start(); + ret = platform_device_add(pd); if (ret) goto fail; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 5ebdb32d9a2b..94f888d3384e 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -1092,7 +1092,7 @@ static int __init edac_devinit(void) name = edac_device_names[i]; dev = platform_device_register_simple(name, -1, NULL, 0); if (IS_ERR(dev)) { - pr_err("Registation of %s failed!\n", name); + pr_err("Registration of %s failed!\n", name); err = PTR_ERR(dev); } } @@ -1103,7 +1103,7 @@ static int __init edac_devinit(void) dev = platform_device_register_simple("octeon_lmc_edac", i, NULL, 0); if (IS_ERR(dev)) { - pr_err("Registation of octeon_lmc_edac %d failed!\n", i); + pr_err("Registration of octeon_lmc_edac %d failed!\n", i); err = PTR_ERR(dev); } } diff --git a/arch/mips/configs/bcm3384_defconfig b/arch/mips/configs/bcm3384_defconfig new file mode 100644 index 000000000000..88711c28ff32 --- /dev/null +++ b/arch/mips/configs/bcm3384_defconfig @@ -0,0 +1,78 @@ +CONFIG_BCM3384=y +CONFIG_HIGHMEM=y +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +# CONFIG_SECCOMP is not set +CONFIG_MIPS_O32_FP64_SUPPORT=y +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_NO_HZ=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_GZIP is not set +CONFIG_EXPERT=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +CONFIG_MAC80211=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_PHYSMAP=y +# CONFIG_BLK_DEV is not set +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +CONFIG_USB_USBNET=y +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_EARLYCON_FORCE=y +CONFIG_SERIAL_BCM63XX=y +CONFIG_SERIAL_BCM63XX_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_DNOTIFY is not set +CONFIG_FUSE_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_CIFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index b2476a1c4aaa..e57058d4ec22 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -120,6 +120,9 @@ CONFIG_SPI_OCTEON=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_USB_SUPPORT is not set +CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y +CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y +CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_STAGING=y diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c index ffd0345780ae..6ecda64ad184 100644 --- a/arch/mips/fw/lib/cmdline.c +++ b/arch/mips/fw/lib/cmdline.c @@ -68,7 +68,7 @@ char *fw_getenv(char *envname) result = fw_envp(index + 1); break; } else if (fw_envp(index)[i] == '=') { - result = (fw_envp(index + 1) + i); + result = fw_envp(index) + i + 1; break; } } @@ -88,13 +88,13 @@ unsigned long fw_getenvl(char *envname) { unsigned long envl = 0UL; char *str; - long val; int tmp; str = fw_getenv(envname); if (str) { - tmp = kstrtol(str, 0, &val); - envl = (unsigned long)val; + tmp = kstrtoul(str, 0, &envl); + if (tmp) + envl = 0; } return envl; diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 72e1cf1cab00..200efeac4181 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += cputime.h generic-y += current.h generic-y += dma-contiguous.h generic-y += emergency-restart.h -generic-y += hash.h generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 6dd6bfc607e9..857da84cfc92 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -17,6 +17,7 @@ #include <linux/irqflags.h> #include <linux/types.h> #include <asm/barrier.h> +#include <asm/compiler.h> #include <asm/cpu-features.h> #include <asm/cmpxchg.h> #include <asm/war.h> @@ -40,95 +41,97 @@ */ #define atomic_set(v, i) ((v)->counter = (i)) -#define ATOMIC_OP(op, c_op, asm_op) \ -static __inline__ void atomic_##op(int i, atomic_t * v) \ -{ \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ - int temp; \ - \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: ll %0, %1 # atomic_" #op " \n" \ - " " #asm_op " %0, %2 \n" \ - " sc %0, %1 \n" \ - " beqzl %0, 1b \n" \ - " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } else if (kernel_uses_llsc) { \ - int temp; \ - \ - do { \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - " ll %0, %1 # atomic_" #op "\n" \ - " " #asm_op " %0, %2 \n" \ - " sc %0, %1 \n" \ - " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } while (unlikely(!temp)); \ - } else { \ - unsigned long flags; \ - \ - raw_local_irq_save(flags); \ - v->counter c_op i; \ - raw_local_irq_restore(flags); \ - } \ -} \ - -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ -{ \ - int result; \ - \ - smp_mb__before_llsc(); \ - \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ - int temp; \ - \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: ll %1, %2 # atomic_" #op "_return \n" \ - " " #asm_op " %0, %1, %3 \n" \ - " sc %0, %2 \n" \ - " beqzl %0, 1b \n" \ - " " #asm_op " %0, %1, %3 \n" \ - " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } else if (kernel_uses_llsc) { \ - int temp; \ - \ - do { \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - " ll %1, %2 # atomic_" #op "_return \n" \ - " " #asm_op " %0, %1, %3 \n" \ - " sc %0, %2 \n" \ - " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } while (unlikely(!result)); \ - \ - result = temp; result c_op i; \ - } else { \ - unsigned long flags; \ - \ - raw_local_irq_save(flags); \ - result = v->counter; \ - result c_op i; \ - v->counter = result; \ - raw_local_irq_restore(flags); \ - } \ - \ - smp_llsc_mb(); \ - \ - return result; \ +#define ATOMIC_OP(op, c_op, asm_op) \ +static __inline__ void atomic_##op(int i, atomic_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %0, %1 # atomic_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " ll %0, %1 # atomic_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ } -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + int result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) ATOMIC_OPS(add, +=, addu) @@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i), "m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF12_ASM() (v->counter) + : "Ir" (i), GCC_OFF12_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { int temp; @@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; @@ -315,96 +320,98 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) */ #define atomic64_set(v, i) ((v)->counter = (i)) -#define ATOMIC64_OP(op, c_op, asm_op) \ -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ -{ \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ - long temp; \ - \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: lld %0, %1 # atomic64_" #op " \n" \ - " " #asm_op " %0, %2 \n" \ - " scd %0, %1 \n" \ - " beqzl %0, 1b \n" \ - " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } else if (kernel_uses_llsc) { \ - long temp; \ - \ - do { \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - " lld %0, %1 # atomic64_" #op "\n" \ - " " #asm_op " %0, %2 \n" \ - " scd %0, %1 \n" \ - " .set mips0 \n" \ - : "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } while (unlikely(!temp)); \ - } else { \ - unsigned long flags; \ - \ - raw_local_irq_save(flags); \ - v->counter c_op i; \ - raw_local_irq_restore(flags); \ - } \ -} \ - -#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ -static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ -{ \ - long result; \ - \ - smp_mb__before_llsc(); \ - \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ - long temp; \ - \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - "1: lld %1, %2 # atomic64_" #op "_return\n" \ - " " #asm_op " %0, %1, %3 \n" \ - " scd %0, %2 \n" \ - " beqzl %0, 1b \n" \ - " " #asm_op " %0, %1, %3 \n" \ - " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ - : "Ir" (i)); \ - } else if (kernel_uses_llsc) { \ - long temp; \ - \ - do { \ - __asm__ __volatile__( \ - " .set arch=r4000 \n" \ - " lld %1, %2 # atomic64_" #op "_return\n" \ - " " #asm_op " %0, %1, %3 \n" \ - " scd %0, %2 \n" \ - " .set mips0 \n" \ - : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ - : "Ir" (i), "m" (v->counter) \ - : "memory"); \ - } while (unlikely(!result)); \ - \ - result = temp; result c_op i; \ - } else { \ - unsigned long flags; \ - \ - raw_local_irq_save(flags); \ - result = v->counter; \ - result c_op i; \ - v->counter = result; \ - raw_local_irq_restore(flags); \ - } \ - \ - smp_llsc_mb(); \ - \ - return result; \ +#define ATOMIC64_OP(op, c_op, asm_op) \ +static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %0, %1 # atomic64_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " lld %0, %1 # atomic64_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} + +#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ +static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +{ \ + long result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "=" GCC_OFF12_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ + : "memory"); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ } -#define ATOMIC64_OPS(op, c_op, asm_op) \ - ATOMIC64_OP(op, c_op, asm_op) \ +#define ATOMIC64_OPS(op, c_op, asm_op) \ + ATOMIC64_OP(op, c_op, asm_op) \ ATOMIC64_OP_RETURN(op, c_op, asm_op) ATOMIC64_OPS(add, +=, daddu) @@ -415,7 +422,8 @@ ATOMIC64_OPS(sub, -=, dsubu) #undef ATOMIC64_OP /* - * atomic64_sub_if_positive - conditionally subtract integer from atomic variable + * atomic64_sub_if_positive - conditionally subtract integer from atomic + * variable * @i: integer value to subtract * @v: pointer of type atomic64_t * @@ -443,8 +451,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) + : "=&r" (result), "=&r" (temp), + "=" GCC_OFF12_ASM() (v->counter) + : "Ir" (i), GCC_OFF12_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { long temp; @@ -461,7 +470,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index d0101dd0575e..2b8bbbcb9be0 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -10,58 +10,6 @@ #include <asm/addrspace.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - */ - #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) @@ -127,20 +75,21 @@ #include <asm/wbflush.h> -#define wmb() fast_wmb() -#define rmb() fast_rmb() #define mb() wbflush() #define iob() wbflush() #else /* !CONFIG_CPU_HAS_WB */ -#define wmb() fast_wmb() -#define rmb() fast_rmb() #define mb() fast_mb() #define iob() fast_iob() #endif /* !CONFIG_CPU_HAS_WB */ +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define dma_wmb() fast_wmb() +#define dma_rmb() fast_rmb() + #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) # ifdef CONFIG_CPU_CAVIUM_OCTEON # define smp_mb() __sync() diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index bae6b0fa8ab5..6663bcca9d0c 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -17,6 +17,7 @@ #include <linux/types.h> #include <asm/barrier.h> #include <asm/byteorder.h> /* sigh ... */ +#include <asm/compiler.h> #include <asm/cpu-features.h> #include <asm/sgidefs.h> #include <asm/war.h> @@ -78,8 +79,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=m" (*m) - : "ir" (1UL << bit), "m" (*m)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) + : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); #ifdef CONFIG_CPU_MIPSR2 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { @@ -87,7 +88,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __LL "%0, %1 # set_bit \n" " " __INS "%0, %3, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (bit), "r" (~0)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 */ @@ -99,7 +100,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " or %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -130,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (~(1UL << bit))); #ifdef CONFIG_CPU_MIPSR2 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { @@ -139,7 +140,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __LL "%0, %1 # clear_bit \n" " " __INS "%0, $0, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (bit)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 */ @@ -151,7 +152,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " and %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); } else @@ -196,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (1UL << bit)); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -209,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " xor %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -244,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -258,7 +259,7 @@ static inline int test_and_set_bit(unsigned long nr, " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -312,7 +313,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -354,7 +355,7 @@ static inline int test_and_clear_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); #ifdef CONFIG_CPU_MIPSR2 @@ -368,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr, " " __EXT "%2, %0, %3, 1 \n" " " __INS "%0, $0, %3, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "ir" (bit) : "memory"); } while (unlikely(!temp)); @@ -385,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr, " xor %2, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -427,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -441,7 +442,7 @@ static inline int test_and_change_bit(unsigned long nr, " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+m" (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index cbaccebf5065..30939b02e3ff 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -84,6 +84,7 @@ extern char bmips_smp_int_vec_end; extern int bmips_smp_enabled; extern int bmips_cpu_offset; extern cpumask_t bmips_booted_mask; +extern unsigned long bmips_tp1_irqs; extern void bmips_ebase_setup(void); extern asmlinkage void plat_wired_tlb_setup(void); diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 1f7ca8b00404..b603804caac5 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -70,10 +70,7 @@ enum loongson_machine_type { MACH_DEXXON_GDIUM2F10, MACH_LEMOTE_NAS, MACH_LEMOTE_LL2F, - MACH_LEMOTE_A1004, - MACH_LEMOTE_A1101, - MACH_LEMOTE_A1201, - MACH_LEMOTE_A1205, + MACH_LOONGSON_GENERIC, MACH_LOONGSON_END }; @@ -101,16 +98,16 @@ extern unsigned long mips_machtype; struct boot_mem_map { int nr_map; struct boot_mem_map_entry { - phys_t addr; /* start of memory segment */ - phys_t size; /* size of memory segment */ + phys_addr_t addr; /* start of memory segment */ + phys_addr_t size; /* size of memory segment */ long type; /* type of memory segment */ } map[BOOT_MEM_MAP_MAX]; }; extern struct boot_mem_map boot_mem_map; -extern void add_memory_region(phys_t start, phys_t size, long type); -extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max); +extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type); +extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max); extern void prom_init(void); extern void prom_free_prom_memory(void); diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h index 778e32d817bc..4809c29a4890 100644 --- a/arch/mips/include/asm/clock.h +++ b/arch/mips/include/asm/clock.h @@ -35,9 +35,6 @@ struct clk { #define CLK_ALWAYS_ENABLED (1 << 0) #define CLK_RATE_PROPAGATES (1 << 1) -/* Should be defined by processor-specific code */ -void arch_init_clk_ops(struct clk_ops **, int type); - int clk_init(void); int __clk_enable(struct clk *); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index eefcaa363a87..28b1edf19501 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -10,6 +10,7 @@ #include <linux/bug.h> #include <linux/irqflags.h> +#include <asm/compiler.h> #include <asm/war.h> static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) @@ -30,8 +31,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " sc %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; @@ -45,8 +46,9 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set arch=r4000 \n" " sc %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -80,8 +82,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " scd %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; @@ -93,8 +95,9 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + "=&r" (dummy) + : GCC_OFF12_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -155,8 +158,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqzl $1, 1b \n" \ "2: \n" \ " .set pop \n" \ - : "=&r" (__ret), "=R" (*m) \ - : "R" (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ + : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else if (kernel_uses_llsc) { \ __asm__ __volatile__( \ @@ -172,8 +175,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqz $1, 1b \n" \ " .set pop \n" \ "2: \n" \ - : "=&r" (__ret), "=R" (*m) \ - : "R" (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ + : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else { \ unsigned long __flags; \ diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index 71f5c5cfc58a..c73815e0123a 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -16,4 +16,12 @@ #define GCC_REG_ACCUM "accum" #endif +#ifndef CONFIG_CPU_MICROMIPS +#define GCC_OFF12_ASM() "R" +#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) +#define GCC_OFF12_ASM() "ZC" +#else +#error "microMIPS compilation unsupported with GCC older than 4.9" +#endif + #endif /* _ASM_COMPILER_H */ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 3325f3eb248c..2897cfafcaf0 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -344,4 +344,8 @@ # define cpu_has_msa 0 #endif +#ifndef cpu_has_fre +# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE) +#endif + #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index dfdc77ed1839..33866fce4d63 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -142,6 +142,7 @@ #define PRID_IMP_BMIPS3300_BUG 0x0000 #define PRID_IMP_BMIPS43XX 0xa000 #define PRID_IMP_BMIPS5000 0x5a00 +#define PRID_IMP_BMIPS5200 0x5b00 #define PRID_REV_BMIPS4380_LO 0x0040 #define PRID_REV_BMIPS4380_HI 0x006f @@ -368,6 +369,7 @@ enum cpu_type_enum { #define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */ #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ +#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h index 4da0c1fe30d9..ae6fedcb0060 100644 --- a/arch/mips/include/asm/edac.h +++ b/arch/mips/include/asm/edac.h @@ -1,6 +1,8 @@ #ifndef ASM_EDAC_H #define ASM_EDAC_H +#include <asm/compiler.h> + /* ECC atomic, DMA, SMP and interrupt safe scrub function */ static inline void atomic_scrub(void *va, u32 size) @@ -24,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size) " sc %0, %1 \n" " beqz %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=m" (*virt_addr) - : "m" (*virt_addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) + : GCC_OFF12_ASM() (*virt_addr)); virt_addr++; } diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 1d38fe0edd2d..eb4d95de619c 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -8,6 +8,8 @@ #ifndef _ASM_ELF_H #define _ASM_ELF_H +#include <linux/fs.h> +#include <uapi/linux/elf.h> /* ELF header e_flags defines. */ /* MIPS architecture level. */ @@ -28,6 +30,7 @@ #define PT_MIPS_REGINFO 0x70000000 #define PT_MIPS_RTPROC 0x70000001 #define PT_MIPS_OPTIONS 0x70000002 +#define PT_MIPS_ABIFLAGS 0x70000003 /* Flags in the e_flags field of the header */ #define EF_MIPS_NOREORDER 0x00000001 @@ -174,6 +177,30 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; +struct mips_elf_abiflags_v0 { + uint16_t version; /* Version of flags structure */ + uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */ + uint8_t isa_rev; /* The revision of ISA: 0 for MIPS V and below, + 1-n otherwise */ + uint8_t gpr_size; /* The size of general purpose registers */ + uint8_t cpr1_size; /* The size of co-processor 1 registers */ + uint8_t cpr2_size; /* The size of co-processor 2 registers */ + uint8_t fp_abi; /* The floating-point ABI */ + uint32_t isa_ext; /* Mask of processor-specific extensions */ + uint32_t ases; /* Mask of ASEs used */ + uint32_t flags1; /* Mask of general flags */ + uint32_t flags2; +}; + +#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */ +#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */ +#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */ +#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */ +#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */ +#define MIPS_ABI_FP_XX 5 /* -mfpxx */ +#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */ +#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */ + #ifdef CONFIG_32BIT /* @@ -262,16 +289,13 @@ extern struct mips_abi mips_abi_n32; #ifdef CONFIG_32BIT -#define SET_PERSONALITY(ex) \ +#define SET_PERSONALITY2(ex, state) \ do { \ - if ((ex).e_flags & EF_MIPS_FP64) \ - clear_thread_flag(TIF_32BIT_FPREGS); \ - else \ - set_thread_flag(TIF_32BIT_FPREGS); \ - \ if (personality(current->personality) != PER_LINUX) \ set_personality(PER_LINUX); \ \ + mips_set_personality_fp(state); \ + \ current->thread.abi = &mips_abi; \ } while (0) @@ -291,44 +315,44 @@ do { \ #endif #ifdef CONFIG_MIPS32_O32 -#define __SET_PERSONALITY32_O32(ex) \ +#define __SET_PERSONALITY32_O32(ex, state) \ do { \ set_thread_flag(TIF_32BIT_REGS); \ set_thread_flag(TIF_32BIT_ADDR); \ \ - if (!((ex).e_flags & EF_MIPS_FP64)) \ - set_thread_flag(TIF_32BIT_FPREGS); \ + mips_set_personality_fp(state); \ \ current->thread.abi = &mips_abi_32; \ } while (0) #else -#define __SET_PERSONALITY32_O32(ex) \ +#define __SET_PERSONALITY32_O32(ex, state) \ do { } while (0) #endif #ifdef CONFIG_MIPS32_COMPAT -#define __SET_PERSONALITY32(ex) \ +#define __SET_PERSONALITY32(ex, state) \ do { \ if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \ ((ex).e_flags & EF_MIPS_ABI) == 0) \ __SET_PERSONALITY32_N32(); \ else \ - __SET_PERSONALITY32_O32(ex); \ + __SET_PERSONALITY32_O32(ex, state); \ } while (0) #else -#define __SET_PERSONALITY32(ex) do { } while (0) +#define __SET_PERSONALITY32(ex, state) do { } while (0) #endif -#define SET_PERSONALITY(ex) \ +#define SET_PERSONALITY2(ex, state) \ do { \ unsigned int p; \ \ clear_thread_flag(TIF_32BIT_REGS); \ clear_thread_flag(TIF_32BIT_FPREGS); \ + clear_thread_flag(TIF_HYBRID_FPREGS); \ clear_thread_flag(TIF_32BIT_ADDR); \ \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ - __SET_PERSONALITY32(ex); \ + __SET_PERSONALITY32(ex, state); \ else \ current->thread.abi = &mips_abi; \ \ @@ -390,4 +414,24 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +struct arch_elf_state { + int fp_abi; + int interp_fp_abi; + int overall_abi; +}; + +#define INIT_ARCH_ELF_STATE { \ + .fp_abi = -1, \ + .interp_fp_abi = -1, \ + .overall_abi = -1, \ +} + +extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, + bool is_interp, struct arch_elf_state *state); + +extern int arch_check_elf(void *ehdr, bool has_interpreter, + struct arch_elf_state *state); + +extern void mips_set_personality_fp(struct arch_elf_state *state); + #endif /* _ASM_ELF_H */ diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index dd562414cd5e..994d21939676 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -36,14 +36,16 @@ extern void _restore_fp(struct task_struct *); /* * This enum specifies a mode in which we want the FPU to operate, for cores - * which implement the Status.FR bit. Note that FPU_32BIT & FPU_64BIT - * purposefully have the values 0 & 1 respectively, so that an integer value - * of Status.FR can be trivially casted to the corresponding enum fpu_mode. + * which implement the Status.FR bit. Note that the bottom bit of the value + * purposefully matches the desired value of the Status.FR bit. */ enum fpu_mode { FPU_32BIT = 0, /* FR = 0 */ - FPU_64BIT, /* FR = 1 */ + FPU_64BIT, /* FR = 1, FRE = 0 */ FPU_AS_IS, + FPU_HYBRID, /* FR = 1, FRE = 1 */ + +#define FPU_FR_MASK 0x1 }; static inline int __enable_fpu(enum fpu_mode mode) @@ -57,6 +59,14 @@ static inline int __enable_fpu(enum fpu_mode mode) enable_fpu_hazard(); return 0; + case FPU_HYBRID: + if (!cpu_has_fre) + return SIGFPE; + + /* set FRE */ + write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE); + goto fr_common; + case FPU_64BIT: #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) /* we only have a 32-bit FPU */ @@ -64,8 +74,11 @@ static inline int __enable_fpu(enum fpu_mode mode) #endif /* fall through */ case FPU_32BIT: + /* clear FRE */ + write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE); +fr_common: /* set CU1 & change FR appropriately */ - fr = (int)mode; + fr = (int)mode & FPU_FR_MASK; change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0)); enable_fpu_hazard(); @@ -102,13 +115,17 @@ static inline int __own_fpu(void) enum fpu_mode mode; int ret; - mode = !test_thread_flag(TIF_32BIT_FPREGS); + if (test_thread_flag(TIF_HYBRID_FPREGS)) + mode = FPU_HYBRID; + else + mode = !test_thread_flag(TIF_32BIT_FPREGS); + ret = __enable_fpu(mode); if (ret) return ret; KSTK_STATUS(current) |= ST0_CU1; - if (mode == FPU_64BIT) + if (mode == FPU_64BIT || mode == FPU_HYBRID) KSTK_STATUS(current) |= ST0_FR; else /* mode == FPU_32BIT */ KSTK_STATUS(current) &= ~ST0_FR; @@ -166,8 +183,24 @@ static inline int init_fpu(void) if (cpu_has_fpu) { ret = __own_fpu(); - if (!ret) + if (!ret) { + unsigned int config5 = read_c0_config5(); + + /* + * Ensure FRE is clear whilst running _init_fpu, since + * single precision FP instructions are used. If FRE + * was set then we'll just end up initialising all 32 + * 64b registers. + */ + write_c0_config5(config5 & ~MIPS_CONF5_FRE); + enable_fpu_hazard(); + _init_fpu(); + + /* Restore FRE */ + write_c0_config5(config5); + enable_fpu_hazard(); + } } else fpu_emulator_init_fpu(); diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index 194cda0396a3..ef9987a61d88 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -14,6 +14,7 @@ #include <linux/uaccess.h> #include <asm/asm-eva.h> #include <asm/barrier.h> +#include <asm/compiler.h> #include <asm/errno.h> #include <asm/war.h> @@ -32,6 +33,7 @@ " beqzl $1, 1b \n" \ __WEAK_LLSC_MB \ "3: \n" \ + " .insn \n" \ " .set pop \n" \ " .set mips0 \n" \ " .section .fixup,\"ax\" \n" \ @@ -42,8 +44,10 @@ " "__UA_ADDR "\t1b, 4b \n" \ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ - : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ - : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ + : "=r" (ret), "=&r" (oldval), \ + "=" GCC_OFF12_ASM() (*uaddr) \ + : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ __asm__ __volatile__( \ @@ -58,6 +62,7 @@ " beqz $1, 1b \n" \ __WEAK_LLSC_MB \ "3: \n" \ + " .insn \n" \ " .set pop \n" \ " .set mips0 \n" \ " .section .fixup,\"ax\" \n" \ @@ -68,8 +73,10 @@ " "__UA_ADDR "\t1b, 4b \n" \ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ - : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ - : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ + : "=r" (ret), "=&r" (oldval), \ + "=" GCC_OFF12_ASM() (*uaddr) \ + : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "i" (-EFAULT) \ : "memory"); \ } else \ ret = -ENOSYS; \ @@ -157,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " beqzl $1, 1b \n" __WEAK_LLSC_MB "3: \n" + " .insn \n" " .set pop \n" " .section .fixup,\"ax\" \n" "4: li %0, %6 \n" @@ -166,8 +174,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=R" (*uaddr) - : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) + : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) + : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__( @@ -184,6 +193,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " beqz $1, 1b \n" __WEAK_LLSC_MB "3: \n" + " .insn \n" " .set pop \n" " .section .fixup,\"ax\" \n" "4: li %0, %6 \n" @@ -193,8 +203,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=R" (*uaddr) - : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) + : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) + : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + "i" (-EFAULT) : "memory"); } else return -ENOSYS; diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h deleted file mode 100644 index d7699cf7e135..000000000000 --- a/arch/mips/include/asm/gic.h +++ /dev/null @@ -1,384 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000, 07 MIPS Technologies, Inc. - * - * GIC Register Definitions - * - */ -#ifndef _ASM_GICREGS_H -#define _ASM_GICREGS_H - -#include <linux/bitmap.h> -#include <linux/threads.h> - -#include <irq.h> - -#undef GICISBYTELITTLEENDIAN - -/* Constants */ -#define GIC_POL_POS 1 -#define GIC_POL_NEG 0 -#define GIC_TRIG_EDGE 1 -#define GIC_TRIG_LEVEL 0 - -#define MSK(n) ((1 << (n)) - 1) -#define REG32(addr) (*(volatile unsigned int *) (addr)) -#define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS) -#define REGP(base, phys) REG32((unsigned long)(base) + (phys)) - -/* Accessors */ -#define GIC_REG(segment, offset) \ - REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS) -#define GIC_REG_ADDR(segment, offset) \ - REG32(_gic_base + segment##_##SECTION_OFS + offset) - -#define GIC_ABS_REG(segment, offset) \ - (_gic_base + segment##_##SECTION_OFS + offset##_##OFS) -#define GIC_REG_ABS_ADDR(segment, offset) \ - (_gic_base + segment##_##SECTION_OFS + offset) - -#ifdef GICISBYTELITTLEENDIAN -#define GICREAD(reg, data) ((data) = (reg), (data) = le32_to_cpu(data)) -#define GICWRITE(reg, data) ((reg) = cpu_to_le32(data)) -#else -#define GICREAD(reg, data) ((data) = (reg)) -#define GICWRITE(reg, data) ((reg) = (data)) -#endif -#define GICBIS(reg, mask, bits) \ - do { u32 data; \ - GICREAD(reg, data); \ - data &= ~(mask); \ - data |= ((bits) & (mask)); \ - GICWRITE((reg), data); \ - } while (0) - - -/* GIC Address Space */ -#define SHARED_SECTION_OFS 0x0000 -#define SHARED_SECTION_SIZE 0x8000 -#define VPE_LOCAL_SECTION_OFS 0x8000 -#define VPE_LOCAL_SECTION_SIZE 0x4000 -#define VPE_OTHER_SECTION_OFS 0xc000 -#define VPE_OTHER_SECTION_SIZE 0x4000 -#define USM_VISIBLE_SECTION_OFS 0x10000 -#define USM_VISIBLE_SECTION_SIZE 0x10000 - -/* Register Map for Shared Section */ - -#define GIC_SH_CONFIG_OFS 0x0000 - -/* Shared Global Counter */ -#define GIC_SH_COUNTER_31_00_OFS 0x0010 -#define GIC_SH_COUNTER_63_32_OFS 0x0014 -#define GIC_SH_REVISIONID_OFS 0x0020 - -/* Interrupt Polarity */ -#define GIC_SH_POL_31_0_OFS 0x0100 -#define GIC_SH_POL_63_32_OFS 0x0104 -#define GIC_SH_POL_95_64_OFS 0x0108 -#define GIC_SH_POL_127_96_OFS 0x010c -#define GIC_SH_POL_159_128_OFS 0x0110 -#define GIC_SH_POL_191_160_OFS 0x0114 -#define GIC_SH_POL_223_192_OFS 0x0118 -#define GIC_SH_POL_255_224_OFS 0x011c - -/* Edge/Level Triggering */ -#define GIC_SH_TRIG_31_0_OFS 0x0180 -#define GIC_SH_TRIG_63_32_OFS 0x0184 -#define GIC_SH_TRIG_95_64_OFS 0x0188 -#define GIC_SH_TRIG_127_96_OFS 0x018c -#define GIC_SH_TRIG_159_128_OFS 0x0190 -#define GIC_SH_TRIG_191_160_OFS 0x0194 -#define GIC_SH_TRIG_223_192_OFS 0x0198 -#define GIC_SH_TRIG_255_224_OFS 0x019c - -/* Dual Edge Triggering */ -#define GIC_SH_DUAL_31_0_OFS 0x0200 -#define GIC_SH_DUAL_63_32_OFS 0x0204 -#define GIC_SH_DUAL_95_64_OFS 0x0208 -#define GIC_SH_DUAL_127_96_OFS 0x020c -#define GIC_SH_DUAL_159_128_OFS 0x0210 -#define GIC_SH_DUAL_191_160_OFS 0x0214 -#define GIC_SH_DUAL_223_192_OFS 0x0218 -#define GIC_SH_DUAL_255_224_OFS 0x021c - -/* Set/Clear corresponding bit in Edge Detect Register */ -#define GIC_SH_WEDGE_OFS 0x0280 - -/* Reset Mask - Disables Interrupt */ -#define GIC_SH_RMASK_31_0_OFS 0x0300 -#define GIC_SH_RMASK_63_32_OFS 0x0304 -#define GIC_SH_RMASK_95_64_OFS 0x0308 -#define GIC_SH_RMASK_127_96_OFS 0x030c -#define GIC_SH_RMASK_159_128_OFS 0x0310 -#define GIC_SH_RMASK_191_160_OFS 0x0314 -#define GIC_SH_RMASK_223_192_OFS 0x0318 -#define GIC_SH_RMASK_255_224_OFS 0x031c - -/* Set Mask (WO) - Enables Interrupt */ -#define GIC_SH_SMASK_31_0_OFS 0x0380 -#define GIC_SH_SMASK_63_32_OFS 0x0384 -#define GIC_SH_SMASK_95_64_OFS 0x0388 -#define GIC_SH_SMASK_127_96_OFS 0x038c -#define GIC_SH_SMASK_159_128_OFS 0x0390 -#define GIC_SH_SMASK_191_160_OFS 0x0394 -#define GIC_SH_SMASK_223_192_OFS 0x0398 -#define GIC_SH_SMASK_255_224_OFS 0x039c - -/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ -#define GIC_SH_MASK_31_0_OFS 0x0400 -#define GIC_SH_MASK_63_32_OFS 0x0404 -#define GIC_SH_MASK_95_64_OFS 0x0408 -#define GIC_SH_MASK_127_96_OFS 0x040c -#define GIC_SH_MASK_159_128_OFS 0x0410 -#define GIC_SH_MASK_191_160_OFS 0x0414 -#define GIC_SH_MASK_223_192_OFS 0x0418 -#define GIC_SH_MASK_255_224_OFS 0x041c - -/* Pending Global Interrupts (RO) */ -#define GIC_SH_PEND_31_0_OFS 0x0480 -#define GIC_SH_PEND_63_32_OFS 0x0484 -#define GIC_SH_PEND_95_64_OFS 0x0488 -#define GIC_SH_PEND_127_96_OFS 0x048c -#define GIC_SH_PEND_159_128_OFS 0x0490 -#define GIC_SH_PEND_191_160_OFS 0x0494 -#define GIC_SH_PEND_223_192_OFS 0x0498 -#define GIC_SH_PEND_255_224_OFS 0x049c - -#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 - -/* Maps Interrupt X to a Pin */ -#define GIC_SH_MAP_TO_PIN(intr) \ - (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr)) - -#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 - -/* Maps Interrupt X to a VPE */ -#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ - (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4)) -#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) - -/* Convert an interrupt number to a byte offset/bit for multi-word registers */ -#define GIC_INTR_OFS(intr) (((intr) / 32)*4) -#define GIC_INTR_BIT(intr) ((intr) % 32) - -/* Polarity : Reset Value is always 0 */ -#define GIC_SH_SET_POLARITY_OFS 0x0100 -#define GIC_SET_POLARITY(intr, pol) \ - GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + \ - GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \ - (pol) << GIC_INTR_BIT(intr)) - -/* Triggering : Reset Value is always 0 */ -#define GIC_SH_SET_TRIGGER_OFS 0x0180 -#define GIC_SET_TRIGGER(intr, trig) \ - GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + \ - GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \ - (trig) << GIC_INTR_BIT(intr)) - -/* Mask manipulation */ -#define GIC_SH_SMASK_OFS 0x0380 -#define GIC_SET_INTR_MASK(intr) \ - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + \ - GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr)) -#define GIC_SH_RMASK_OFS 0x0300 -#define GIC_CLR_INTR_MASK(intr) \ - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + \ - GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr)) - -/* Register Map for Local Section */ -#define GIC_VPE_CTL_OFS 0x0000 -#define GIC_VPE_PEND_OFS 0x0004 -#define GIC_VPE_MASK_OFS 0x0008 -#define GIC_VPE_RMASK_OFS 0x000c -#define GIC_VPE_SMASK_OFS 0x0010 -#define GIC_VPE_WD_MAP_OFS 0x0040 -#define GIC_VPE_COMPARE_MAP_OFS 0x0044 -#define GIC_VPE_TIMER_MAP_OFS 0x0048 -#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 -#define GIC_VPE_SWINT0_MAP_OFS 0x0054 -#define GIC_VPE_SWINT1_MAP_OFS 0x0058 -#define GIC_VPE_OTHER_ADDR_OFS 0x0080 -#define GIC_VPE_WD_CONFIG0_OFS 0x0090 -#define GIC_VPE_WD_COUNT0_OFS 0x0094 -#define GIC_VPE_WD_INITIAL0_OFS 0x0098 -#define GIC_VPE_COMPARE_LO_OFS 0x00a0 -#define GIC_VPE_COMPARE_HI_OFS 0x00a4 - -#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 -#define GIC_VPE_EIC_SS(intr) \ - (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr)) - -#define GIC_VPE_EIC_VEC_BASE 0x0800 -#define GIC_VPE_EIC_VEC(intr) \ - (GIC_VPE_EIC_VEC_BASE + (4 * intr)) - -#define GIC_VPE_TENABLE_NMI_OFS 0x1000 -#define GIC_VPE_TENABLE_YQ_OFS 0x1004 -#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 -#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 - -/* User Mode Visible Section Register Map */ -#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 -#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 - -/* Masks */ -#define GIC_SH_CONFIG_COUNTSTOP_SHF 28 -#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) - -#define GIC_SH_CONFIG_COUNTBITS_SHF 24 -#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) - -#define GIC_SH_CONFIG_NUMINTRS_SHF 16 -#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) - -#define GIC_SH_CONFIG_NUMVPES_SHF 0 -#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) - -#define GIC_SH_WEDGE_SET(intr) (intr | (0x1 << 31)) -#define GIC_SH_WEDGE_CLR(intr) (intr & ~(0x1 << 31)) - -#define GIC_MAP_TO_PIN_SHF 31 -#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) -#define GIC_MAP_TO_NMI_SHF 30 -#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) -#define GIC_MAP_TO_YQ_SHF 29 -#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) -#define GIC_MAP_SHF 0 -#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) - -/* GIC_VPE_CTL Masks */ -#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 -#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) -#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 -#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) -#define GIC_VPE_CTL_EIC_MODE_SHF 0 -#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) - -/* GIC_VPE_PEND Masks */ -#define GIC_VPE_PEND_WD_SHF 0 -#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) -#define GIC_VPE_PEND_CMP_SHF 1 -#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) -#define GIC_VPE_PEND_TIMER_SHF 2 -#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) -#define GIC_VPE_PEND_PERFCOUNT_SHF 3 -#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) -#define GIC_VPE_PEND_SWINT0_SHF 4 -#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) -#define GIC_VPE_PEND_SWINT1_SHF 5 -#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) - -/* GIC_VPE_RMASK Masks */ -#define GIC_VPE_RMASK_WD_SHF 0 -#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) -#define GIC_VPE_RMASK_CMP_SHF 1 -#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) -#define GIC_VPE_RMASK_TIMER_SHF 2 -#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) -#define GIC_VPE_RMASK_PERFCNT_SHF 3 -#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) -#define GIC_VPE_RMASK_SWINT0_SHF 4 -#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) -#define GIC_VPE_RMASK_SWINT1_SHF 5 -#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) - -/* GIC_VPE_SMASK Masks */ -#define GIC_VPE_SMASK_WD_SHF 0 -#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) -#define GIC_VPE_SMASK_CMP_SHF 1 -#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) -#define GIC_VPE_SMASK_TIMER_SHF 2 -#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) -#define GIC_VPE_SMASK_PERFCNT_SHF 3 -#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) -#define GIC_VPE_SMASK_SWINT0_SHF 4 -#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) -#define GIC_VPE_SMASK_SWINT1_SHF 5 -#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) - -/* - * Set the Mapping of Interrupt X to a VPE. - */ -#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \ - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \ - GIC_SH_MAP_TO_VPE_REG_BIT(vpe)) - -/* - * Interrupt Meta-data specification. The ipiflag helps - * in building ipi_map. - */ -struct gic_intr_map { - unsigned int cpunum; /* Directed to this CPU */ -#define GIC_UNUSED 0xdead /* Dummy data */ - unsigned int pin; /* Directed to this Pin */ - unsigned int polarity; /* Polarity : +/- */ - unsigned int trigtype; /* Trigger : Edge/Levl */ - unsigned int flags; /* Misc flags */ -#define GIC_FLAG_TRANSPARENT 0x01 -}; - -/* - * This is only used in EIC mode. This helps to figure out which - * shared interrupts we need to process when we get a vector interrupt. - */ -#define GIC_MAX_SHARED_INTR 0x5 -struct gic_shared_intr_map { - unsigned int num_shared_intr; - unsigned int intr_list[GIC_MAX_SHARED_INTR]; - unsigned int local_intr_mask; -}; - -/* GIC nomenclature for Core Interrupt Pins. */ -#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ -#define GIC_CPU_INT1 1 /* . */ -#define GIC_CPU_INT2 2 /* . */ -#define GIC_CPU_INT3 3 /* . */ -#define GIC_CPU_INT4 4 /* . */ -#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ - -/* Local GIC interrupts. */ -#define GIC_INT_TMR (GIC_CPU_INT5) -#define GIC_INT_PERFCTR (GIC_CPU_INT5) - -/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ -#define GIC_CPU_TO_VEC_OFFSET (2) - -/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ -#define GIC_PIN_TO_VEC_OFFSET (1) - -#include <linux/clocksource.h> -#include <linux/irq.h> - -extern unsigned int gic_present; -extern unsigned int gic_frequency; -extern unsigned long _gic_base; -extern unsigned int gic_irq_base; -extern unsigned int gic_irq_flags[]; -extern struct gic_shared_intr_map gic_shared_intr_map[]; - -extern void gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, - unsigned int intrmap_size, unsigned int irqbase); -extern void gic_clocksource_init(unsigned int); -extern unsigned int gic_compare_int (void); -extern cycle_t gic_read_count(void); -extern cycle_t gic_read_compare(void); -extern void gic_write_compare(cycle_t cnt); -extern void gic_write_cpu_compare(cycle_t cnt, int cpu); -extern void gic_send_ipi(unsigned int intr); -extern unsigned int plat_ipi_call_int_xlate(unsigned int); -extern unsigned int plat_ipi_resched_int_xlate(unsigned int); -extern void gic_bind_eic_interrupt(int irq, int set); -extern unsigned int gic_get_timer_pending(void); -extern void gic_get_int_mask(unsigned long *dst, const unsigned long *src); -extern unsigned int gic_get_int(void); -extern void gic_enable_interrupt(int irq_vec); -extern void gic_disable_interrupt(int irq_vec); -extern void gic_irq_ack(struct irq_data *d); -extern void gic_finish_irq(struct irq_data *d); -extern void gic_platform_init(int irqs, struct irq_chip *irq_controller); -#endif /* _ASM_GICREGS_H */ diff --git a/arch/mips/include/asm/hpet.h b/arch/mips/include/asm/hpet.h new file mode 100644 index 000000000000..18a8f778bfaa --- /dev/null +++ b/arch/mips/include/asm/hpet.h @@ -0,0 +1,73 @@ +#ifndef _ASM_HPET_H +#define _ASM_HPET_H + +#ifdef CONFIG_RS780_HPET + +#define HPET_MMAP_SIZE 1024 + +#define HPET_ID 0x000 +#define HPET_PERIOD 0x004 +#define HPET_CFG 0x010 +#define HPET_STATUS 0x020 +#define HPET_COUNTER 0x0f0 + +#define HPET_Tn_CFG(n) (0x100 + 0x20 * n) +#define HPET_Tn_CMP(n) (0x108 + 0x20 * n) +#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n) + +#define HPET_T0_IRS 0x001 +#define HPET_T1_IRS 0x002 +#define HPET_T3_IRS 0x004 + +#define HPET_T0_CFG 0x100 +#define HPET_T0_CMP 0x108 +#define HPET_T0_ROUTE 0x110 +#define HPET_T1_CFG 0x120 +#define HPET_T1_CMP 0x128 +#define HPET_T1_ROUTE 0x130 +#define HPET_T2_CFG 0x140 +#define HPET_T2_CMP 0x148 +#define HPET_T2_ROUTE 0x150 + +#define HPET_ID_REV 0x000000ff +#define HPET_ID_NUMBER 0x00001f00 +#define HPET_ID_64BIT 0x00002000 +#define HPET_ID_LEGSUP 0x00008000 +#define HPET_ID_VENDOR 0xffff0000 +#define HPET_ID_NUMBER_SHIFT 8 +#define HPET_ID_VENDOR_SHIFT 16 + +#define HPET_CFG_ENABLE 0x001 +#define HPET_CFG_LEGACY 0x002 +#define HPET_LEGACY_8254 2 +#define HPET_LEGACY_RTC 8 + +#define HPET_TN_LEVEL 0x0002 +#define HPET_TN_ENABLE 0x0004 +#define HPET_TN_PERIODIC 0x0008 +#define HPET_TN_PERIODIC_CAP 0x0010 +#define HPET_TN_64BIT_CAP 0x0020 +#define HPET_TN_SETVAL 0x0040 +#define HPET_TN_32BIT 0x0100 +#define HPET_TN_ROUTE 0x3e00 +#define HPET_TN_FSB 0x4000 +#define HPET_TN_FSB_CAP 0x8000 +#define HPET_TN_ROUTE_SHIFT 9 + +/* Max HPET Period is 10^8 femto sec as in HPET spec */ +#define HPET_MAX_PERIOD 100000000UL +/* + * Min HPET period is 10^5 femto sec just for safety. If it is less than this, + * then 32 bit HPET counter wrapsaround in less than 0.5 sec. + */ +#define HPET_MIN_PERIOD 100000UL + +#define HPET_ADDR 0x20000 +#define HPET_MMIO_ADDR 0x90000e0000020000 +#define HPET_FREQ 14318780 +#define HPET_COMPARE_VAL ((HPET_FREQ + HZ / 2) / HZ) +#define HPET_T0_IRQ 0 + +extern void __init setup_hpet_timer(void); +#endif /* CONFIG_RS780_HPET */ +#endif /* _ASM_HPET_H */ diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index 1c967abd545c..a2d18ab57ac6 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h @@ -22,7 +22,6 @@ extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, .exit_latency = 1,\ .target_residency = 1,\ .power_usage = UINT_MAX,\ - .flags = CPUIDLE_FLAG_TIME_VALID,\ .name = "wait",\ .desc = "MIPS wait",\ } diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 933b50e125a0..9e777cd42b67 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -167,7 +167,7 @@ static inline void * isa_bus_to_virt(unsigned long address) */ #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) -extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); +extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags); extern void __iounmap(const volatile void __iomem *addr); #ifndef CONFIG_PCI @@ -175,7 +175,7 @@ struct pci_dev; static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} #endif -static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, +static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size, unsigned long flags) { void __iomem *addr = plat_ioremap(offset, size, flags); @@ -183,7 +183,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, if (addr) return addr; -#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) +#define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) if (cpu_has_64bit_addresses) { u64 base = UNCAC_BASE; @@ -197,7 +197,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, return (void __iomem *) (unsigned long) (base + offset); } else if (__builtin_constant_p(offset) && __builtin_constant_p(size) && __builtin_constant_p(flags)) { - phys_t phys_addr, last_addr; + phys_addr_t phys_addr, last_addr; phys_addr = fixup_bigphys_addr(offset, size); diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 39f07aec640c..5a4e1bb8fb1b 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -48,4 +48,7 @@ extern int cp0_compare_irq; extern int cp0_compare_irq_shift; extern int cp0_perfcount_irq; +void arch_trigger_all_cpu_backtrace(bool); +#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace + #endif /* _ASM_IRQ_H */ diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h index 3f11fdb3ed8c..39a160bb41dc 100644 --- a/arch/mips/include/asm/irq_cpu.h +++ b/arch/mips/include/asm/irq_cpu.h @@ -19,8 +19,8 @@ extern void rm9k_cpu_irq_init(void); #ifdef CONFIG_IRQ_DOMAIN struct device_node; -extern int mips_cpu_intc_init(struct device_node *of_node, - struct device_node *parent); +extern int mips_cpu_irq_of_init(struct device_node *of_node, + struct device_node *parent); #endif #endif /* _ASM_IRQ_CPU_H */ diff --git a/arch/mips/include/asm/mach-ath25/ath25_platform.h b/arch/mips/include/asm/mach-ath25/ath25_platform.h new file mode 100644 index 000000000000..4f4ee4f9e5ec --- /dev/null +++ b/arch/mips/include/asm/mach-ath25/ath25_platform.h @@ -0,0 +1,73 @@ +#ifndef __ASM_MACH_ATH25_PLATFORM_H +#define __ASM_MACH_ATH25_PLATFORM_H + +#include <linux/etherdevice.h> + +/* + * This is board-specific data that is stored in a "fixed" location in flash. + * It is shared across operating systems, so it should not be changed lightly. + * The main reason we need it is in order to extract the ethernet MAC + * address(es). + */ +struct ath25_boarddata { + u32 magic; /* board data is valid */ +#define ATH25_BD_MAGIC 0x35333131 /* "5311", for all 531x/231x platforms */ + u16 cksum; /* checksum (starting with BD_REV 2) */ + u16 rev; /* revision of this struct */ +#define BD_REV 4 + char board_name[64]; /* Name of board */ + u16 major; /* Board major number */ + u16 minor; /* Board minor number */ + u32 flags; /* Board configuration */ +#define BD_ENET0 0x00000001 /* ENET0 is stuffed */ +#define BD_ENET1 0x00000002 /* ENET1 is stuffed */ +#define BD_UART1 0x00000004 /* UART1 is stuffed */ +#define BD_UART0 0x00000008 /* UART0 is stuffed (dma) */ +#define BD_RSTFACTORY 0x00000010 /* Reset factory defaults stuffed */ +#define BD_SYSLED 0x00000020 /* System LED stuffed */ +#define BD_EXTUARTCLK 0x00000040 /* External UART clock */ +#define BD_CPUFREQ 0x00000080 /* cpu freq is valid in nvram */ +#define BD_SYSFREQ 0x00000100 /* sys freq is set in nvram */ +#define BD_WLAN0 0x00000200 /* Enable WLAN0 */ +#define BD_MEMCAP 0x00000400 /* CAP SDRAM @ mem_cap for testing */ +#define BD_DISWATCHDOG 0x00000800 /* disable system watchdog */ +#define BD_WLAN1 0x00001000 /* Enable WLAN1 (ar5212) */ +#define BD_ISCASPER 0x00002000 /* FLAG for AR2312 */ +#define BD_WLAN0_2G_EN 0x00004000 /* FLAG for radio0_2G */ +#define BD_WLAN0_5G_EN 0x00008000 /* FLAG for radio0_2G */ +#define BD_WLAN1_2G_EN 0x00020000 /* FLAG for radio0_2G */ +#define BD_WLAN1_5G_EN 0x00040000 /* FLAG for radio0_2G */ + u16 reset_config_gpio; /* Reset factory GPIO pin */ + u16 sys_led_gpio; /* System LED GPIO pin */ + + u32 cpu_freq; /* CPU core frequency in Hz */ + u32 sys_freq; /* System frequency in Hz */ + u32 cnt_freq; /* Calculated C0_COUNT frequency */ + + u8 wlan0_mac[ETH_ALEN]; + u8 enet0_mac[ETH_ALEN]; + u8 enet1_mac[ETH_ALEN]; + + u16 pci_id; /* Pseudo PCIID for common code */ + u16 mem_cap; /* cap bank1 in MB */ + + /* version 3 */ + u8 wlan1_mac[ETH_ALEN]; /* (ar5212) */ +}; + +#define BOARD_CONFIG_BUFSZ 0x1000 + +/* + * Platform device information for the Wireless MAC + */ +struct ar231x_board_config { + u16 devid; + + /* board config data */ + struct ath25_boarddata *config; + + /* radio calibration data */ + const char *radio; +}; + +#endif /* __ASM_MACH_ATH25_PLATFORM_H */ diff --git a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h new file mode 100644 index 000000000000..ade0356df257 --- /dev/null +++ b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h @@ -0,0 +1,64 @@ +/* + * Atheros AR231x/AR531x SoC specific CPU feature overrides + * + * Copyright (C) 2008 Gabor Juhos <juhosg@openwrt.org> + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H + +/* + * The Atheros AR531x/AR231x SoCs have MIPS 4Kc/4KEc core. + */ +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_ejtag 1 + +#if !defined(CONFIG_SOC_AR5312) +# define cpu_has_llsc 1 +#else +/* + * The MIPS 4Kc V0.9 core in the AR5312/AR2312 have problems with the + * ll/sc instructions. + */ +# define cpu_has_llsc 0 +#endif + +#define cpu_has_mips16 0 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 + +#if !defined(CONFIG_SOC_AR5312) +# define cpu_has_mips32r2 1 +#endif + +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#endif /* __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h new file mode 100644 index 000000000000..d8009c93a465 --- /dev/null +++ b/arch/mips/include/asm/mach-ath25/dma-coherence.h @@ -0,0 +1,82 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> + * + */ +#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H +#define __ASM_MACH_ATH25_DMA_COHERENCE_H + +#include <linux/device.h> + +/* + * We need some arbitrary non-zero value to be programmed to the BAR1 register + * of PCI host controller to enable DMA. The same value should be used as the + * offset to calculate the physical address of DMA buffer for PCI devices. + */ +#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000 + +static inline dma_addr_t ath25_dev_offset(struct device *dev) +{ +#ifdef CONFIG_PCI + extern struct bus_type pci_bus_type; + + if (dev && dev->bus == &pci_bus_type) + return AR2315_PCI_HOST_SDRAM_BASEADDR; +#endif + return 0; +} + +static inline dma_addr_t +plat_map_dma_mem(struct device *dev, void *addr, size_t size) +{ + return virt_to_phys(addr) + ath25_dev_offset(dev); +} + +static inline dma_addr_t +plat_map_dma_mem_page(struct device *dev, struct page *page) +{ + return page_to_phys(page) + ath25_dev_offset(dev); +} + +static inline unsigned long +plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr - ath25_dev_offset(dev); +} + +static inline void +plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ +} + +static inline int plat_dma_supported(struct device *dev, u64 mask) +{ + return 1; +} + +static inline void plat_extra_sync_for_device(struct device *dev) +{ +} + +static inline int plat_dma_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ + return 0; +} + +static inline int plat_device_is_coherent(struct device *dev) +{ +#ifdef CONFIG_DMA_COHERENT + return 1; +#endif +#ifdef CONFIG_DMA_NONCOHERENT + return 0; +#endif +} + +#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-ath25/gpio.h b/arch/mips/include/asm/mach-ath25/gpio.h new file mode 100644 index 000000000000..713564b8e8ef --- /dev/null +++ b/arch/mips/include/asm/mach-ath25/gpio.h @@ -0,0 +1,16 @@ +#ifndef __ASM_MACH_ATH25_GPIO_H +#define __ASM_MACH_ATH25_GPIO_H + +#include <asm-generic/gpio.h> + +#define gpio_get_value __gpio_get_value +#define gpio_set_value __gpio_set_value +#define gpio_cansleep __gpio_cansleep +#define gpio_to_irq __gpio_to_irq + +static inline int irq_to_gpio(unsigned irq) +{ + return -EINVAL; +} + +#endif /* __ASM_MACH_ATH25_GPIO_H */ diff --git a/arch/mips/include/asm/mach-ath25/war.h b/arch/mips/include/asm/mach-ath25/war.h new file mode 100644 index 000000000000..e3a5250ebd67 --- /dev/null +++ b/arch/mips/include/asm/mach-ath25/war.h @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org> + */ +#ifndef __ASM_MACH_ATH25_WAR_H +#define __ASM_MACH_ATH25_WAR_H + +#define R4600_V1_INDEX_ICACHEOP_WAR 0 +#define R4600_V1_HIT_CACHEOP_WAR 0 +#define R4600_V2_HIT_CACHEOP_WAR 0 +#define R5432_CP0_INTERRUPT_WAR 0 +#define BCM1250_M3_WAR 0 +#define SIBYTE_1956_WAR 0 +#define MIPS4K_ICACHE_REFILL_WAR 0 +#define MIPS_CACHE_SYNC_WAR 0 +#define TX49XX_ICACHE_INDEX_INV_WAR 0 +#define RM9000_CDEX_SMP_WAR 0 +#define ICACHE_REFILLS_WORKAROUND_WAR 0 +#define R10000_LLSC_WAR 0 +#define MIPS34K_MISSED_ITLB_WAR 0 + +#endif /* __ASM_MACH_ATH25_WAR_H */ diff --git a/arch/mips/include/asm/mach-au1x00/ioremap.h b/arch/mips/include/asm/mach-au1x00/ioremap.h index 75a94ad3ac91..99fea1fbb4f5 100644 --- a/arch/mips/include/asm/mach-au1x00/ioremap.h +++ b/arch/mips/include/asm/mach-au1x00/ioremap.h @@ -11,10 +11,10 @@ #include <linux/types.h> -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI) -extern phys_t __fixup_bigphys_addr(phys_t, phys_t); +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_PCI) +extern phys_addr_t __fixup_bigphys_addr(phys_addr_t, phys_addr_t); #else -static inline phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) +static inline phys_addr_t __fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { return phys_addr; } @@ -23,12 +23,12 @@ static inline phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) /* * Allow physical addresses to be fixed up to help 36-bit peripherals. */ -static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { return __fixup_bigphys_addr(phys_addr, size); } -static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, unsigned long flags) { return NULL; diff --git a/arch/mips/include/asm/mach-bcm3384/dma-coherence.h b/arch/mips/include/asm/mach-bcm3384/dma-coherence.h new file mode 100644 index 000000000000..a3be8e50e1f0 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm3384/dma-coherence.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_MACH_BCM3384_DMA_COHERENCE_H +#define __ASM_MACH_BCM3384_DMA_COHERENCE_H + +struct device; + +extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size); +extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page); +extern unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr); + +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) +{ +} + +static inline int plat_dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if (mask < DMA_BIT_MASK(24)) + return 0; + + return 1; +} + +static inline int plat_device_is_coherent(struct device *dev) +{ + return 0; +} + +#endif /* __ASM_MACH_BCM3384_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-bcm3384/war.h b/arch/mips/include/asm/mach-bcm3384/war.h new file mode 100644 index 000000000000..59d7599059b0 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm3384/war.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> + */ +#ifndef __ASM_MIPS_MACH_BCM3384_WAR_H +#define __ASM_MIPS_MACH_BCM3384_WAR_H + +#define R4600_V1_INDEX_ICACHEOP_WAR 0 +#define R4600_V1_HIT_CACHEOP_WAR 0 +#define R4600_V2_HIT_CACHEOP_WAR 0 +#define R5432_CP0_INTERRUPT_WAR 0 +#define BCM1250_M3_WAR 0 +#define SIBYTE_1956_WAR 0 +#define MIPS4K_ICACHE_REFILL_WAR 0 +#define MIPS_CACHE_SYNC_WAR 0 +#define TX49XX_ICACHE_INDEX_INV_WAR 0 +#define ICACHE_REFILLS_WORKAROUND_WAR 0 +#define R10000_LLSC_WAR 0 +#define MIPS34K_MISSED_ITLB_WAR 0 + +#endif /* __ASM_MIPS_MACH_BCM3384_WAR_H */ diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h index 36a3fc1aa3ae..ee59ffe99922 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h @@ -14,40 +14,8 @@ #include <linux/types.h> #include <linux/kernel.h> -struct nvram_header { - u32 magic; - u32 len; - u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ - u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ - u32 config_ncdl; /* ncdl values for memc */ -}; - -#define NVRAM_HEADER 0x48534C46 /* 'FLSH' */ -#define NVRAM_VERSION 1 -#define NVRAM_HEADER_SIZE 20 -#define NVRAM_SPACE 0x8000 - -#define FLASH_MIN 0x00020000 /* Minimum flash size */ - -#define NVRAM_MAX_VALUE_LEN 255 -#define NVRAM_MAX_PARAM_LEN 64 - -extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len); - -static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6]) -{ - if (strchr(buf, ':')) - sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], - &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], - &macaddr[5]); - else if (strchr(buf, '-')) - sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0], - &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], - &macaddr[5]); - else - printk(KERN_WARNING "Can not parse mac address: %s\n", buf); -} - +int bcm47xx_nvram_init_from_mem(u32 base, u32 lim); +int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len); int bcm47xx_nvram_gpio_pin(const char *name); #endif /* __BCM47XX_NVRAM_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h index ff15e3b14e7a..aea6e64b828f 100644 --- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h +++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h @@ -3,12 +3,12 @@ #include <bcm63xx_cpu.h> -static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { return phys_addr; } -static inline int is_bcm63xx_internal_registers(phys_t offset) +static inline int is_bcm63xx_internal_registers(phys_addr_t offset) { switch (bcm63xx_get_cpu_id()) { case BCM3368_CPU_ID: @@ -32,7 +32,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset) return 0; } -static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, unsigned long flags) { if (is_bcm63xx_internal_registers(offset)) diff --git a/arch/mips/include/asm/mach-generic/ioremap.h b/arch/mips/include/asm/mach-generic/ioremap.h index b379938d47f0..513371f7c39c 100644 --- a/arch/mips/include/asm/mach-generic/ioremap.h +++ b/arch/mips/include/asm/mach-generic/ioremap.h @@ -15,12 +15,12 @@ * Allow physical addresses to be fixed up to help peripherals located * outside the low 32-bit range -- generic pass-through version. */ -static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { return phys_addr; } -static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, unsigned long flags) { return NULL; diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h index 139cd200e79d..050e18bb1a04 100644 --- a/arch/mips/include/asm/mach-generic/irq.h +++ b/arch/mips/include/asm/mach-generic/irq.h @@ -36,4 +36,10 @@ #endif /* CONFIG_IRQ_CPU */ +#ifdef CONFIG_MIPS_GIC +#ifndef MIPS_GIC_IRQ_BASE +#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) +#endif +#endif /* CONFIG_MIPS_GIC */ + #endif /* __ASM_MACH_GENERIC_IRQ_H */ diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h index f196cceb7322..4e5ae6523cb4 100644 --- a/arch/mips/include/asm/mach-lantiq/lantiq.h +++ b/arch/mips/include/asm/mach-lantiq/lantiq.h @@ -48,6 +48,8 @@ extern struct clk *clk_get_ppe(void); extern unsigned char ltq_boot_select(void); /* find out what caused the last cpu reset */ extern int ltq_reset_cause(void); +/* find out the soc type */ +extern int ltq_soc_type(void); #define IOPORT_RESOURCE_START 0x10000000 #define IOPORT_RESOURCE_END 0xffffffff diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h index 3388fc53599e..fa802926523f 100644 --- a/arch/mips/include/asm/mach-loongson/boot_param.h +++ b/arch/mips/include/asm/mach-loongson/boot_param.h @@ -10,7 +10,8 @@ #define VIDEO_ROM 7 #define ADAPTER_ROM 8 #define ACPI_TABLE 9 -#define MAX_MEMORY_TYPE 10 +#define SMBIOS_TABLE 10 +#define MAX_MEMORY_TYPE 11 #define LOONGSON3_BOOT_MEM_MAP_MAX 128 struct efi_memory_map_loongson { @@ -42,15 +43,49 @@ struct efi_cpuinfo_loongson { u32 processor_id; /* PRID, e.g. 6305, 6306 */ u32 cputype; /* Loongson_3A/3B, etc. */ u32 total_node; /* num of total numa nodes */ - u32 cpu_startup_core_id; /* Core id */ + u16 cpu_startup_core_id; /* Boot core id */ + u16 reserved_cores_mask; u32 cpu_clock_freq; /* cpu_clock */ u32 nr_cpus; } __packed; +#define MAX_UARTS 64 +struct uart_device { + u32 iotype; /* see include/linux/serial_core.h */ + u32 uartclk; + u32 int_offset; + u64 uart_base; +} __packed; + +#define MAX_SENSORS 64 +#define SENSOR_TEMPER 0x00000001 +#define SENSOR_VOLTAGE 0x00000002 +#define SENSOR_FAN 0x00000004 +struct sensor_device { + char name[32]; /* a formal name */ + char label[64]; /* a flexible description */ + u32 type; /* SENSOR_* */ + u32 id; /* instance id of a sensor-class */ + u32 fan_policy; /* see loongson_hwmon.h */ + u32 fan_percent;/* only for constant speed policy */ + u64 base_addr; /* base address of device registers */ +} __packed; + struct system_loongson { u16 vers; /* version of system_loongson */ u32 ccnuma_smp; /* 0: no numa; 1: has numa */ u32 sing_double_channel; /* 1:single; 2:double */ + u32 nr_uarts; + struct uart_device uarts[MAX_UARTS]; + u32 nr_sensors; + struct sensor_device sensors[MAX_SENSORS]; + char has_ec; + char ec_name[32]; + u64 ec_base_addr; + char has_tcm; + char tcm_name[32]; + u64 tcm_base_addr; + u64 workarounds; /* see workarounds.h */ } __packed; struct irq_source_routing_table { @@ -149,6 +184,8 @@ struct loongson_system_configuration { u32 nr_nodes; int cores_per_node; int cores_per_package; + u16 boot_cpu_id; + u16 reserved_cpus_mask; enum loongson_cpu_type cputype; u64 ht_control_base; u64 pci_mem_start_addr; @@ -159,9 +196,15 @@ struct loongson_system_configuration { u64 suspend_addr; u64 vgabios_addr; u32 dma_mask_bits; + char ecname[32]; + u32 nr_uarts; + struct uart_device uarts[MAX_UARTS]; + u32 nr_sensors; + struct sensor_device sensors[MAX_SENSORS]; + u64 workarounds; }; extern struct efi_memory_map_loongson *loongson_memmap; extern struct loongson_system_configuration loongson_sysconf; -extern int cpuhotplug_workaround; + #endif diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h index 6a902751cc7f..a90534161bd2 100644 --- a/arch/mips/include/asm/mach-loongson/dma-coherence.h +++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h @@ -23,7 +23,7 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) { #ifdef CONFIG_CPU_LOONGSON3 - return virt_to_phys(addr); + return phys_to_dma(dev, virt_to_phys(addr)); #else return virt_to_phys(addr) | 0x80000000; #endif @@ -33,7 +33,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) { #ifdef CONFIG_CPU_LOONGSON3 - return page_to_phys(page); + return phys_to_dma(dev, page_to_phys(page)); #else return page_to_phys(page) | 0x80000000; #endif @@ -43,7 +43,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr) { #if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT) - return dma_addr; + return dma_to_phys(dev, dma_addr); #elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff); #else diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h index 34560bda6626..a281cca5f2fb 100644 --- a/arch/mips/include/asm/mach-loongson/irq.h +++ b/arch/mips/include/asm/mach-loongson/irq.h @@ -32,8 +32,7 @@ #define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a) #define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18) -#define LOONGSON_INT_CORE0_INT0 0x11 /* route to int 0 of core 0 */ -#define LOONGSON_INT_CORE0_INT1 0x21 /* route to int 1 of core 0 */ +#define LOONGSON_INT_COREx_INTy(x, y) (1<<(x) | 1<<(y+4)) /* route to int y of core x */ #endif diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h index 92bf76c21441..5459ac09679f 100644 --- a/arch/mips/include/asm/mach-loongson/loongson.h +++ b/arch/mips/include/asm/mach-loongson/loongson.h @@ -35,7 +35,7 @@ extern void __init prom_init_cmdline(void); extern void __init prom_init_machtype(void); extern void __init prom_init_env(void); #ifdef CONFIG_LOONGSON_UART_BASE -extern unsigned long _loongson_uart_base, loongson_uart_base; +extern unsigned long _loongson_uart_base[], loongson_uart_base[]; extern void prom_init_loongson_uart_base(void); #endif diff --git a/arch/mips/include/asm/mach-loongson/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson/loongson_hwmon.h new file mode 100644 index 000000000000..4431fc54a36c --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/loongson_hwmon.h @@ -0,0 +1,55 @@ +#ifndef __LOONGSON_HWMON_H_ +#define __LOONGSON_HWMON_H_ + +#include <linux/types.h> + +#define MIN_TEMP 0 +#define MAX_TEMP 255 +#define NOT_VALID_TEMP 999 + +typedef int (*get_temp_fun)(int); +extern int loongson3_cpu_temp(int); + +/* 0:Max speed, 1:Manual, 2:Auto */ +enum fan_control_mode { + FAN_FULL_MODE = 0, + FAN_MANUAL_MODE = 1, + FAN_AUTO_MODE = 2, + FAN_MODE_END +}; + +struct temp_range { + u8 low; + u8 high; + u8 level; +}; + +#define CONSTANT_SPEED_POLICY 0 /* at constent speed */ +#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */ +#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */ + +#define MAX_STEP_NUM 16 +#define MAX_FAN_LEVEL 255 + +/* loongson_fan_policy works when fan work at FAN_AUTO_MODE */ +struct loongson_fan_policy { + u8 type; + + /* percent only used when type is CONSTANT_SPEED_POLICY */ + u8 percent; + + /* period between two check. (Unit: S) */ + u8 adjust_period; + + /* fan adjust usually depend on a temprature input */ + get_temp_fun depend_temp; + + /* up_step/down_step used when type is STEP_SPEED_POLICY */ + u8 up_step_num; + u8 down_step_num; + struct temp_range up_step[MAX_STEP_NUM]; + struct temp_range down_step[MAX_STEP_NUM]; + struct delayed_work work; +}; + +#endif /* __LOONGSON_HWMON_H_*/ diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h index 228e37847a36..cb2b60249cd2 100644 --- a/arch/mips/include/asm/mach-loongson/machine.h +++ b/arch/mips/include/asm/mach-loongson/machine.h @@ -26,7 +26,7 @@ #ifdef CONFIG_LOONGSON_MACH3X -#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101 +#define LOONGSON_MACHTYPE MACH_LOONGSON_GENERIC #endif /* CONFIG_LOONGSON_MACH3X */ diff --git a/arch/mips/include/asm/mach-loongson/topology.h b/arch/mips/include/asm/mach-loongson/topology.h index 5598ba77d2ef..0d8f3b55bdbc 100644 --- a/arch/mips/include/asm/mach-loongson/topology.h +++ b/arch/mips/include/asm/mach-loongson/topology.h @@ -3,7 +3,7 @@ #ifdef CONFIG_NUMA -#define cpu_to_node(cpu) ((cpu) >> 2) +#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2) #define parent_node(node) (node) #define cpumask_of_node(node) (&__node_data[(node)]->cpumask) diff --git a/arch/mips/include/asm/mach-loongson/workarounds.h b/arch/mips/include/asm/mach-loongson/workarounds.h new file mode 100644 index 000000000000..e180c1422eae --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/workarounds.h @@ -0,0 +1,7 @@ +#ifndef __ASM_MACH_LOONGSON_WORKAROUNDS_H_ +#define __ASM_MACH_LOONGSON_WORKAROUNDS_H_ + +#define WORKAROUND_CPUFREQ 0x00000001 +#define WORKAROUND_CPUHOTPLUG 0x00000002 + +#endif diff --git a/arch/mips/include/asm/mach-loongson1/cpufreq.h b/arch/mips/include/asm/mach-loongson1/cpufreq.h new file mode 100644 index 000000000000..e7765ce30bcf --- /dev/null +++ b/arch/mips/include/asm/mach-loongson1/cpufreq.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com> + * + * Loongson 1 CPUFreq platform support. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + + +#ifndef __ASM_MACH_LOONGSON1_CPUFREQ_H +#define __ASM_MACH_LOONGSON1_CPUFREQ_H + +struct plat_ls1x_cpufreq { + const char *clk_name; /* CPU clk */ + const char *osc_clk_name; /* OSC clk */ + unsigned int max_freq; /* in kHz */ + unsigned int min_freq; /* in kHz */ +}; + +#endif /* __ASM_MACH_LOONGSON1_CPUFREQ_H */ diff --git a/arch/mips/include/asm/mach-loongson1/loongson1.h b/arch/mips/include/asm/mach-loongson1/loongson1.h index 5c437c2ba6b3..20e0c2b155dd 100644 --- a/arch/mips/include/asm/mach-loongson1/loongson1.h +++ b/arch/mips/include/asm/mach-loongson1/loongson1.h @@ -16,6 +16,7 @@ #define DEFAULT_MEMSIZE 256 /* If no memsize provided */ /* Loongson 1 Register Bases */ +#define LS1X_MUX_BASE 0x1fd00420 #define LS1X_INTC_BASE 0x1fd01040 #define LS1X_EHCI_BASE 0x1fe00000 #define LS1X_OHCI_BASE 0x1fe08000 @@ -31,7 +32,10 @@ #define LS1X_I2C0_BASE 0x1fe58000 #define LS1X_I2C1_BASE 0x1fe68000 #define LS1X_I2C2_BASE 0x1fe70000 -#define LS1X_PWM_BASE 0x1fe5c000 +#define LS1X_PWM0_BASE 0x1fe5c000 +#define LS1X_PWM1_BASE 0x1fe5c010 +#define LS1X_PWM2_BASE 0x1fe5c020 +#define LS1X_PWM3_BASE 0x1fe5c030 #define LS1X_WDT_BASE 0x1fe5c060 #define LS1X_RTC_BASE 0x1fe64000 #define LS1X_AC97_BASE 0x1fe74000 @@ -39,6 +43,8 @@ #define LS1X_CLK_BASE 0x1fe78030 #include <regs-clk.h> +#include <regs-mux.h> +#include <regs-pwm.h> #include <regs-wdt.h> #endif /* __ASM_MACH_LOONGSON1_LOONGSON1_H */ diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h index 30c13e508fff..47de55e0c835 100644 --- a/arch/mips/include/asm/mach-loongson1/platform.h +++ b/arch/mips/include/asm/mach-loongson1/platform.h @@ -13,10 +13,12 @@ #include <linux/platform_device.h> -extern struct platform_device ls1x_uart_device; -extern struct platform_device ls1x_eth0_device; -extern struct platform_device ls1x_ehci_device; -extern struct platform_device ls1x_rtc_device; +extern struct platform_device ls1x_uart_pdev; +extern struct platform_device ls1x_cpufreq_pdev; +extern struct platform_device ls1x_eth0_pdev; +extern struct platform_device ls1x_eth1_pdev; +extern struct platform_device ls1x_ehci_pdev; +extern struct platform_device ls1x_rtc_pdev; extern void __init ls1x_clk_init(void); extern void __init ls1x_serial_setup(struct platform_device *pdev); diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h index fb6a3ff9318f..ee2445b10fc3 100644 --- a/arch/mips/include/asm/mach-loongson1/regs-clk.h +++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h @@ -20,15 +20,32 @@ /* Clock PLL Divisor Register Bits */ #define DIV_DC_EN (0x1 << 31) +#define DIV_DC_RST (0x1 << 30) #define DIV_CPU_EN (0x1 << 25) +#define DIV_CPU_RST (0x1 << 24) #define DIV_DDR_EN (0x1 << 19) +#define DIV_DDR_RST (0x1 << 18) +#define RST_DC_EN (0x1 << 5) +#define RST_DC (0x1 << 4) +#define RST_DDR_EN (0x1 << 3) +#define RST_DDR (0x1 << 2) +#define RST_CPU_EN (0x1 << 1) +#define RST_CPU 0x1 #define DIV_DC_SHIFT 26 #define DIV_CPU_SHIFT 20 #define DIV_DDR_SHIFT 14 -#define DIV_DC_WIDTH 5 -#define DIV_CPU_WIDTH 5 -#define DIV_DDR_WIDTH 5 +#define DIV_DC_WIDTH 4 +#define DIV_CPU_WIDTH 4 +#define DIV_DDR_WIDTH 4 + +#define BYPASS_DC_SHIFT 12 +#define BYPASS_DDR_SHIFT 10 +#define BYPASS_CPU_SHIFT 8 + +#define BYPASS_DC_WIDTH 1 +#define BYPASS_DDR_WIDTH 1 +#define BYPASS_CPU_WIDTH 1 #endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */ diff --git a/arch/mips/include/asm/mach-loongson1/regs-mux.h b/arch/mips/include/asm/mach-loongson1/regs-mux.h new file mode 100644 index 000000000000..fb1e36efaa19 --- /dev/null +++ b/arch/mips/include/asm/mach-loongson1/regs-mux.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com> + * + * Loongson 1 MUX Register Definitions. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __ASM_MACH_LOONGSON1_REGS_MUX_H +#define __ASM_MACH_LOONGSON1_REGS_MUX_H + +#define LS1X_MUX_REG(x) \ + ((void __iomem *)KSEG1ADDR(LS1X_MUX_BASE + (x))) + +#define LS1X_MUX_CTRL0 LS1X_MUX_REG(0x0) +#define LS1X_MUX_CTRL1 LS1X_MUX_REG(0x4) + +/* MUX CTRL0 Register Bits */ +#define UART0_USE_PWM23 (0x1 << 28) +#define UART0_USE_PWM01 (0x1 << 27) +#define UART1_USE_LCD0_5_6_11 (0x1 << 26) +#define I2C2_USE_CAN1 (0x1 << 25) +#define I2C1_USE_CAN0 (0x1 << 24) +#define NAND3_USE_UART5 (0x1 << 23) +#define NAND3_USE_UART4 (0x1 << 22) +#define NAND3_USE_UART1_DAT (0x1 << 21) +#define NAND3_USE_UART1_CTS (0x1 << 20) +#define NAND3_USE_PWM23 (0x1 << 19) +#define NAND3_USE_PWM01 (0x1 << 18) +#define NAND2_USE_UART5 (0x1 << 17) +#define NAND2_USE_UART4 (0x1 << 16) +#define NAND2_USE_UART1_DAT (0x1 << 15) +#define NAND2_USE_UART1_CTS (0x1 << 14) +#define NAND2_USE_PWM23 (0x1 << 13) +#define NAND2_USE_PWM01 (0x1 << 12) +#define NAND1_USE_UART5 (0x1 << 11) +#define NAND1_USE_UART4 (0x1 << 10) +#define NAND1_USE_UART1_DAT (0x1 << 9) +#define NAND1_USE_UART1_CTS (0x1 << 8) +#define NAND1_USE_PWM23 (0x1 << 7) +#define NAND1_USE_PWM01 (0x1 << 6) +#define GMAC1_USE_UART1 (0x1 << 4) +#define GMAC1_USE_UART0 (0x1 << 3) +#define LCD_USE_UART0_DAT (0x1 << 2) +#define LCD_USE_UART15 (0x1 << 1) +#define LCD_USE_UART0 0x1 + +/* MUX CTRL1 Register Bits */ +#define USB_RESET (0x1 << 31) +#define SPI1_CS_USE_PWM01 (0x1 << 24) +#define SPI1_USE_CAN (0x1 << 23) +#define DISABLE_DDR_CONFSPACE (0x1 << 20) +#define DDR32TO16EN (0x1 << 16) +#define GMAC1_SHUT (0x1 << 13) +#define GMAC0_SHUT (0x1 << 12) +#define USB_SHUT (0x1 << 11) +#define UART1_3_USE_CAN1 (0x1 << 5) +#define UART1_2_USE_CAN0 (0x1 << 4) +#define GMAC1_USE_TXCLK (0x1 << 3) +#define GMAC0_USE_TXCLK (0x1 << 2) +#define GMAC1_USE_PWM23 (0x1 << 1) +#define GMAC0_USE_PWM01 0x1 + +#endif /* __ASM_MACH_LOONGSON1_REGS_MUX_H */ diff --git a/arch/mips/include/asm/mach-loongson1/regs-pwm.h b/arch/mips/include/asm/mach-loongson1/regs-pwm.h new file mode 100644 index 000000000000..99f2bcc586f0 --- /dev/null +++ b/arch/mips/include/asm/mach-loongson1/regs-pwm.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com> + * + * Loongson 1 PWM Register Definitions. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __ASM_MACH_LOONGSON1_REGS_PWM_H +#define __ASM_MACH_LOONGSON1_REGS_PWM_H + +/* Loongson 1 PWM Timer Register Definitions */ +#define PWM_CNT 0x0 +#define PWM_HRC 0x4 +#define PWM_LRC 0x8 +#define PWM_CTRL 0xc + +/* PWM Control Register Bits */ +#define CNT_RST (0x1 << 7) +#define INT_SR (0x1 << 6) +#define INT_EN (0x1 << 5) +#define PWM_SINGLE (0x1 << 4) +#define PWM_OE (0x1 << 3) +#define CNT_EN 0x1 + +#endif /* __ASM_MACH_LOONGSON1_REGS_PWM_H */ diff --git a/arch/mips/include/asm/mach-loongson1/regs-wdt.h b/arch/mips/include/asm/mach-loongson1/regs-wdt.h index 6574568c2084..c39ee982ad3b 100644 --- a/arch/mips/include/asm/mach-loongson1/regs-wdt.h +++ b/arch/mips/include/asm/mach-loongson1/regs-wdt.h @@ -1,7 +1,7 @@ /* * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com> * - * Loongson 1 watchdog register definitions. + * Loongson 1 Watchdog Register Definitions. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -12,11 +12,8 @@ #ifndef __ASM_MACH_LOONGSON1_REGS_WDT_H #define __ASM_MACH_LOONGSON1_REGS_WDT_H -#define LS1X_WDT_REG(x) \ - ((void __iomem *)KSEG1ADDR(LS1X_WDT_BASE + (x))) - -#define LS1X_WDT_EN LS1X_WDT_REG(0x0) -#define LS1X_WDT_SET LS1X_WDT_REG(0x4) -#define LS1X_WDT_TIMER LS1X_WDT_REG(0x8) +#define WDT_EN 0x0 +#define WDT_TIMER 0x4 +#define WDT_SET 0x8 #endif /* __ASM_MACH_LOONGSON1_REGS_WDT_H */ diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h index f2c13d211abb..47cfe64efbb0 100644 --- a/arch/mips/include/asm/mach-malta/irq.h +++ b/arch/mips/include/asm/mach-malta/irq.h @@ -2,7 +2,6 @@ #define __ASM_MACH_MIPS_IRQ_H -#define GIC_NUM_INTRS (24 + NR_CPUS * 2) #define NR_IRQS 256 #include_next <irq.h> diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h index fc946c835995..2e54b4bff5cf 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h @@ -49,6 +49,7 @@ #include <linux/types.h> +#include <asm/compiler.h> #include <asm/war.h> #ifndef R10000_LLSC_WAR @@ -84,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (~mask), "ir" (value), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); } /* @@ -105,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (mask), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (mask), GCC_OFF12_ASM() (*addr)); } /* @@ -126,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (~mask), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (~mask), GCC_OFF12_ASM() (*addr)); } /* @@ -147,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=m" (*addr) - : "ir" (mask), "m" (*addr)); + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) + : "ir" (mask), GCC_OFF12_ASM() (*addr)); } /* @@ -219,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " .set arch=r4000 \n" \ "1: ll %0, %1 #custom_read_reg32 \n" \ " .set pop \n" \ - : "=r" (tmp), "=m" (*address) \ - : "m" (*address)) + : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ + : GCC_OFF12_ASM() (*address)) #define custom_write_reg32(address, tmp) \ __asm__ __volatile__( \ @@ -230,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " "__beqz"%0, 1b \n" \ " nop \n" \ " .set pop \n" \ - : "=&r" (tmp), "=m" (*address) \ - : "0" (tmp), "m" (*address)) + : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ + : "0" (tmp), GCC_OFF12_ASM() (*address)) #endif /* __ASM_REGOPS_H__ */ diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h index 6f9b24f51157..1976fb815fd1 100644 --- a/arch/mips/include/asm/mach-ralink/mt7620.h +++ b/arch/mips/include/asm/mach-ralink/mt7620.h @@ -13,6 +13,13 @@ #ifndef _MT7620_REGS_H_ #define _MT7620_REGS_H_ +enum mt762x_soc_type { + MT762X_SOC_UNKNOWN = 0, + MT762X_SOC_MT7620A, + MT762X_SOC_MT7620N, + MT762X_SOC_MT7628AN, +}; + #define MT7620_SYSC_BASE 0x10000000 #define SYSC_REG_CHIP_NAME0 0x00 @@ -25,11 +32,9 @@ #define SYSC_REG_CPLL_CONFIG0 0x54 #define SYSC_REG_CPLL_CONFIG1 0x58 -#define MT7620N_CHIP_NAME0 0x33365452 -#define MT7620N_CHIP_NAME1 0x20203235 - -#define MT7620A_CHIP_NAME0 0x3637544d -#define MT7620A_CHIP_NAME1 0x20203032 +#define MT7620_CHIP_NAME0 0x3637544d +#define MT7620_CHIP_NAME1 0x20203032 +#define MT7628_CHIP_NAME1 0x20203832 #define SYSCFG0_XTAL_FREQ_SEL BIT(6) @@ -74,6 +79,9 @@ #define SYSCFG0_DRAM_TYPE_DDR1 1 #define SYSCFG0_DRAM_TYPE_DDR2 2 +#define SYSCFG0_DRAM_TYPE_DDR2_MT7628 0 +#define SYSCFG0_DRAM_TYPE_DDR1_MT7628 1 + #define MT7620_DRAM_BASE 0x0 #define MT7620_SDRAM_SIZE_MIN 2 #define MT7620_SDRAM_SIZE_MAX 64 @@ -82,7 +90,6 @@ #define MT7620_DDR2_SIZE_MIN 32 #define MT7620_DDR2_SIZE_MAX 256 -#define MT7620_GPIO_MODE_I2C BIT(0) #define MT7620_GPIO_MODE_UART0_SHIFT 2 #define MT7620_GPIO_MODE_UART0_MASK 0x7 #define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT) @@ -94,15 +101,40 @@ #define MT7620_GPIO_MODE_GPIO_UARTF 0x5 #define MT7620_GPIO_MODE_GPIO_I2S 0x6 #define MT7620_GPIO_MODE_GPIO 0x7 -#define MT7620_GPIO_MODE_UART1 BIT(5) -#define MT7620_GPIO_MODE_MDIO BIT(8) -#define MT7620_GPIO_MODE_RGMII1 BIT(9) -#define MT7620_GPIO_MODE_RGMII2 BIT(10) -#define MT7620_GPIO_MODE_SPI BIT(11) -#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12) -#define MT7620_GPIO_MODE_WLED BIT(13) -#define MT7620_GPIO_MODE_JTAG BIT(15) -#define MT7620_GPIO_MODE_EPHY BIT(15) -#define MT7620_GPIO_MODE_WDT BIT(22) + +#define MT7620_GPIO_MODE_NAND 0 +#define MT7620_GPIO_MODE_SD 1 +#define MT7620_GPIO_MODE_ND_SD_GPIO 2 +#define MT7620_GPIO_MODE_ND_SD_MASK 0x3 +#define MT7620_GPIO_MODE_ND_SD_SHIFT 18 + +#define MT7620_GPIO_MODE_PCIE_RST 0 +#define MT7620_GPIO_MODE_PCIE_REF 1 +#define MT7620_GPIO_MODE_PCIE_GPIO 2 +#define MT7620_GPIO_MODE_PCIE_MASK 0x3 +#define MT7620_GPIO_MODE_PCIE_SHIFT 16 + +#define MT7620_GPIO_MODE_WDT_RST 0 +#define MT7620_GPIO_MODE_WDT_REF 1 +#define MT7620_GPIO_MODE_WDT_GPIO 2 +#define MT7620_GPIO_MODE_WDT_MASK 0x3 +#define MT7620_GPIO_MODE_WDT_SHIFT 21 + +#define MT7620_GPIO_MODE_I2C 0 +#define MT7620_GPIO_MODE_UART1 5 +#define MT7620_GPIO_MODE_MDIO 8 +#define MT7620_GPIO_MODE_RGMII1 9 +#define MT7620_GPIO_MODE_RGMII2 10 +#define MT7620_GPIO_MODE_SPI 11 +#define MT7620_GPIO_MODE_SPI_REF_CLK 12 +#define MT7620_GPIO_MODE_WLED 13 +#define MT7620_GPIO_MODE_JTAG 15 +#define MT7620_GPIO_MODE_EPHY 15 +#define MT7620_GPIO_MODE_PA 20 + +static inline int mt7620_get_eco(void) +{ + return rt_sysc_r32(SYSC_REG_CHIP_REV) & CHIP_REV_ECO_MASK; +} #endif diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h new file mode 100644 index 000000000000..be106cb2e26d --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/pinmux.h @@ -0,0 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * publishhed by the Free Software Foundation. + * + * Copyright (C) 2012 John Crispin <blogic@openwrt.org> + */ + +#ifndef _RT288X_PINMUX_H__ +#define _RT288X_PINMUX_H__ + +#define FUNC(name, value, pin_first, pin_count) \ + { name, value, pin_first, pin_count } + +#define GRP(_name, _func, _mask, _shift) \ + { .name = _name, .mask = _mask, .shift = _shift, \ + .func = _func, .gpio = _mask, \ + .func_count = ARRAY_SIZE(_func) } + +#define GRP_G(_name, _func, _mask, _gpio, _shift) \ + { .name = _name, .mask = _mask, .shift = _shift, \ + .func = _func, .gpio = _gpio, \ + .func_count = ARRAY_SIZE(_func) } + +struct rt2880_pmx_group; + +struct rt2880_pmx_func { + const char *name; + const char value; + + int pin_first; + int pin_count; + int *pins; + + int *groups; + int group_count; + + int enabled; +}; + +struct rt2880_pmx_group { + const char *name; + int enabled; + + const u32 shift; + const char mask; + const char gpio; + + struct rt2880_pmx_func *func; + int func_count; +}; + +extern struct rt2880_pmx_group *rt2880_pinmux_data; + +#endif diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h index 5a508f9f9432..bd93014490df 100644 --- a/arch/mips/include/asm/mach-ralink/ralink_regs.h +++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h @@ -26,6 +26,13 @@ static inline u32 rt_sysc_r32(unsigned reg) return __raw_readl(rt_sysc_membase + reg); } +static inline void rt_sysc_m32(u32 clr, u32 set, unsigned reg) +{ + u32 val = rt_sysc_r32(reg) & ~clr; + + __raw_writel(val | set, rt_sysc_membase + reg); +} + static inline void rt_memc_w32(u32 val, unsigned reg) { __raw_writel(val, rt_memc_membase + reg); diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h index 069bf37a6010..96f731bac79a 100644 --- a/arch/mips/include/asm/mach-ralink/rt305x.h +++ b/arch/mips/include/asm/mach-ralink/rt305x.h @@ -125,24 +125,29 @@ static inline int soc_is_rt5350(void) #define RT305X_GPIO_GE0_TXD0 40 #define RT305X_GPIO_GE0_RXCLK 51 -#define RT305X_GPIO_MODE_I2C BIT(0) -#define RT305X_GPIO_MODE_SPI BIT(1) #define RT305X_GPIO_MODE_UART0_SHIFT 2 #define RT305X_GPIO_MODE_UART0_MASK 0x7 #define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT) -#define RT305X_GPIO_MODE_UARTF 0x0 -#define RT305X_GPIO_MODE_PCM_UARTF 0x1 -#define RT305X_GPIO_MODE_PCM_I2S 0x2 -#define RT305X_GPIO_MODE_I2S_UARTF 0x3 -#define RT305X_GPIO_MODE_PCM_GPIO 0x4 -#define RT305X_GPIO_MODE_GPIO_UARTF 0x5 -#define RT305X_GPIO_MODE_GPIO_I2S 0x6 -#define RT305X_GPIO_MODE_GPIO 0x7 -#define RT305X_GPIO_MODE_UART1 BIT(5) -#define RT305X_GPIO_MODE_JTAG BIT(6) -#define RT305X_GPIO_MODE_MDIO BIT(7) -#define RT305X_GPIO_MODE_SDRAM BIT(8) -#define RT305X_GPIO_MODE_RGMII BIT(9) +#define RT305X_GPIO_MODE_UARTF 0 +#define RT305X_GPIO_MODE_PCM_UARTF 1 +#define RT305X_GPIO_MODE_PCM_I2S 2 +#define RT305X_GPIO_MODE_I2S_UARTF 3 +#define RT305X_GPIO_MODE_PCM_GPIO 4 +#define RT305X_GPIO_MODE_GPIO_UARTF 5 +#define RT305X_GPIO_MODE_GPIO_I2S 6 +#define RT305X_GPIO_MODE_GPIO 7 + +#define RT305X_GPIO_MODE_I2C 0 +#define RT305X_GPIO_MODE_SPI 1 +#define RT305X_GPIO_MODE_UART1 5 +#define RT305X_GPIO_MODE_JTAG 6 +#define RT305X_GPIO_MODE_MDIO 7 +#define RT305X_GPIO_MODE_SDRAM 8 +#define RT305X_GPIO_MODE_RGMII 9 +#define RT5350_GPIO_MODE_PHY_LED 14 +#define RT5350_GPIO_MODE_SPI_CS1 21 +#define RT3352_GPIO_MODE_LNA 18 +#define RT3352_GPIO_MODE_PA 20 #define RT3352_SYSC_REG_SYSCFG0 0x010 #define RT3352_SYSC_REG_SYSCFG1 0x014 diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h index 058382f37f92..0fbe6f9257cd 100644 --- a/arch/mips/include/asm/mach-ralink/rt3883.h +++ b/arch/mips/include/asm/mach-ralink/rt3883.h @@ -112,8 +112,6 @@ #define RT3883_CLKCFG1_PCI_CLK_EN BIT(19) #define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18) -#define RT3883_GPIO_MODE_I2C BIT(0) -#define RT3883_GPIO_MODE_SPI BIT(1) #define RT3883_GPIO_MODE_UART0_SHIFT 2 #define RT3883_GPIO_MODE_UART0_MASK 0x7 #define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT) @@ -125,11 +123,15 @@ #define RT3883_GPIO_MODE_GPIO_UARTF 0x5 #define RT3883_GPIO_MODE_GPIO_I2S 0x6 #define RT3883_GPIO_MODE_GPIO 0x7 -#define RT3883_GPIO_MODE_UART1 BIT(5) -#define RT3883_GPIO_MODE_JTAG BIT(6) -#define RT3883_GPIO_MODE_MDIO BIT(7) -#define RT3883_GPIO_MODE_GE1 BIT(9) -#define RT3883_GPIO_MODE_GE2 BIT(10) + +#define RT3883_GPIO_MODE_I2C 0 +#define RT3883_GPIO_MODE_SPI 1 +#define RT3883_GPIO_MODE_UART1 5 +#define RT3883_GPIO_MODE_JTAG 6 +#define RT3883_GPIO_MODE_MDIO 7 +#define RT3883_GPIO_MODE_GE1 9 +#define RT3883_GPIO_MODE_GE2 10 + #define RT3883_GPIO_MODE_PCI_SHIFT 11 #define RT3883_GPIO_MODE_PCI_MASK 0x7 #define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT) diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h index d8106f75b9af..5d154cfbcf4c 100644 --- a/arch/mips/include/asm/mach-sead3/irq.h +++ b/arch/mips/include/asm/mach-sead3/irq.h @@ -1,7 +1,6 @@ #ifndef __ASM_MACH_MIPS_IRQ_H #define __ASM_MACH_MIPS_IRQ_H -#define GIC_NUM_INTRS (24 + NR_CPUS * 2) #define NR_IRQS 256 diff --git a/arch/mips/include/asm/mach-tx39xx/ioremap.h b/arch/mips/include/asm/mach-tx39xx/ioremap.h index 93c6c04ffda3..0874cd2b06d7 100644 --- a/arch/mips/include/asm/mach-tx39xx/ioremap.h +++ b/arch/mips/include/asm/mach-tx39xx/ioremap.h @@ -15,12 +15,12 @@ * Allow physical addresses to be fixed up to help peripherals located * outside the low 32-bit range -- generic pass-through version. */ -static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { return phys_addr; } -static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, unsigned long flags) { #define TXX9_DIRECTMAP_BASE 0xff000000ul diff --git a/arch/mips/include/asm/mach-tx49xx/ioremap.h b/arch/mips/include/asm/mach-tx49xx/ioremap.h index 1e7beae72229..4b6a8441b25f 100644 --- a/arch/mips/include/asm/mach-tx49xx/ioremap.h +++ b/arch/mips/include/asm/mach-tx49xx/ioremap.h @@ -15,12 +15,12 @@ * Allow physical addresses to be fixed up to help peripherals located * outside the low 32-bit range -- generic pass-through version. */ -static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size) { return phys_addr; } -static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, unsigned long flags) { #ifdef CONFIG_64BIT diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index e330732ddf98..987ff580466b 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h @@ -10,7 +10,7 @@ #ifndef _MIPS_MALTAINT_H #define _MIPS_MALTAINT_H -#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) +#include <linux/irqchip/mips-gic.h> /* * Interrupts 0..15 are used for Malta ISA compatible interrupts @@ -22,29 +22,28 @@ #define MIPSCPU_INT_SW1 1 #define MIPSCPU_INT_MB0 2 #define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0 +#define MIPSCPU_INT_GIC MIPSCPU_INT_MB0 /* GIC chained interrupt */ #define MIPSCPU_INT_MB1 3 #define MIPSCPU_INT_SMI MIPSCPU_INT_MB1 -#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */ #define MIPSCPU_INT_MB2 4 -#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */ #define MIPSCPU_INT_MB3 5 #define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3 #define MIPSCPU_INT_MB4 6 #define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4 /* - * Interrupts 64..127 are used for Soc-it Classic interrupts + * Interrupts 96..127 are used for Soc-it Classic interrupts */ -#define MSC01C_INT_BASE 64 +#define MSC01C_INT_BASE 96 /* SOC-it Classic interrupt offsets */ #define MSC01C_INT_TMR 0 #define MSC01C_INT_PCI 1 /* - * Interrupts 64..127 are used for Soc-it EIC interrupts + * Interrupts 96..127 are used for Soc-it EIC interrupts */ -#define MSC01E_INT_BASE 64 +#define MSC01E_INT_BASE 96 /* SOC-it EIC interrupt offsets */ #define MSC01E_INT_SW0 1 @@ -63,14 +62,7 @@ #define MSC01E_INT_PERFCTR 10 #define MSC01E_INT_CPUCTR 11 -/* External Interrupts used for IPI */ -#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 -#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 -#define GIC_IPI_EXT_INTR_RESCHED_VPE1 18 -#define GIC_IPI_EXT_INTR_CALLFNC_VPE1 19 -#define GIC_IPI_EXT_INTR_RESCHED_VPE2 20 -#define GIC_IPI_EXT_INTR_CALLFNC_VPE2 21 -#define GIC_IPI_EXT_INTR_RESCHED_VPE3 22 -#define GIC_IPI_EXT_INTR_CALLFNC_VPE3 23 +/* GIC external interrupts */ +#define GIC_INT_I8259A GIC_SHARED_TO_HWIRQ(3) #endif /* !(_MIPS_MALTAINT_H) */ diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h index 6b17aaf7d901..8932c7de0419 100644 --- a/arch/mips/include/asm/mips-boards/sead3int.h +++ b/arch/mips/include/asm/mips-boards/sead3int.h @@ -10,10 +10,23 @@ #ifndef _MIPS_SEAD3INT_H #define _MIPS_SEAD3INT_H +#include <linux/irqchip/mips-gic.h> + /* SEAD-3 GIC address space definitions. */ #define GIC_BASE_ADDR 0x1b1c0000 #define GIC_ADDRSPACE_SZ (128 * 1024) -#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 0) +/* CPU interrupt offsets */ +#define CPU_INT_GIC 2 +#define CPU_INT_EHCI 2 +#define CPU_INT_UART0 4 +#define CPU_INT_UART1 4 +#define CPU_INT_NET 6 + +/* GIC interrupt offsets */ +#define GIC_INT_NET GIC_SHARED_TO_HWIRQ(0) +#define GIC_INT_UART1 GIC_SHARED_TO_HWIRQ(2) +#define GIC_INT_UART0 GIC_SHARED_TO_HWIRQ(3) +#define GIC_INT_EHCI GIC_SHARED_TO_HWIRQ(5) #endif /* !(_MIPS_SEAD3INT_H) */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 6a9d2dd005ca..b95a827d763e 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -30,7 +30,7 @@ extern void __iomem *mips_cm_l2sync_base; * different way by defining a function with the same prototype except for the * name mips_cm_phys_base (without underscores). */ -extern phys_t __mips_cm_phys_base(void); +extern phys_addr_t __mips_cm_phys_base(void); /** * mips_cm_probe - probe for a Coherence Manager diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index e139a534e0fd..1cebe8c79051 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -25,7 +25,7 @@ extern void __iomem *mips_cpc_base; * memory mapped registers. This is platform dependant & must therefore be * implemented per-platform. */ -extern phys_t mips_cpc_default_phys_base(void); +extern phys_addr_t mips_cpc_default_phys_base(void); /** * mips_cpc_phys_base - retrieve the physical base address of the CPC @@ -35,7 +35,7 @@ extern phys_t mips_cpc_default_phys_base(void); * is present. It may be overriden by individual platforms which determine * this address in a different way. */ -extern phys_t __weak mips_cpc_phys_base(void); +extern phys_addr_t __weak mips_cpc_phys_base(void); /** * mips_cpc_probe - probe for a Cluster Power Controller diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 22a135ac91de..5e4aef304b02 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -653,6 +653,9 @@ #define MIPS_CONF5_NF (_ULCAST_(1) << 0) #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) +#define MIPS_CONF5_MVH (_ULCAST_(1) << 5) +#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) +#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) #define MIPS_CONF5_CV (_ULCAST_(1) << 29) @@ -694,6 +697,7 @@ #define MIPS_FPIR_W (_ULCAST_(1) << 20) #define MIPS_FPIR_L (_ULCAST_(1) << 21) #define MIPS_FPIR_F64 (_ULCAST_(1) << 22) +#define MIPS_FPIR_FREP (_ULCAST_(1) << 29) /* * Bits in the MIPS32 Memory Segmentation registers. @@ -994,6 +998,39 @@ do { \ local_irq_restore(__flags); \ } while (0) +#define __readx_32bit_c0_register(source) \ +({ \ + unsigned int __res; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips32r2 \n" \ + " .insn \n" \ + " # mfhc0 $1, %1 \n" \ + " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__res) \ + : "i" (source)); \ + __res; \ +}) + +#define __writex_32bit_c0_register(register, value) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips32r2 \n" \ + " move $1, %0 \n" \ + " # mthc0 $1, %1 \n" \ + " .insn \n" \ + " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \ + " .set pop \n" \ + : \ + : "r" (value), "i" (register)); \ +} while (0) + #define read_c0_index() __read_32bit_c0_register($0, 0) #define write_c0_index(val) __write_32bit_c0_register($0, 0, val) @@ -1003,9 +1040,15 @@ do { \ #define read_c0_entrylo0() __read_ulong_c0_register($2, 0) #define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val) +#define readx_c0_entrylo0() __readx_32bit_c0_register(2) +#define writex_c0_entrylo0(val) __writex_32bit_c0_register(2, val) + #define read_c0_entrylo1() __read_ulong_c0_register($3, 0) #define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val) +#define readx_c0_entrylo1() __readx_32bit_c0_register(3) +#define writex_c0_entrylo1(val) __writex_32bit_c0_register(3, val) + #define read_c0_conf() __read_32bit_c0_register($3, 0) #define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h index 024a71b2bff9..75739c83f07e 100644 --- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h +++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h @@ -76,6 +76,8 @@ #include <linux/prefetch.h> +#include <asm/compiler.h> + #include <asm/octeon/cvmx-fpa.h> /** * By default we disable the max depth support. Most programs @@ -273,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, " lbu %[ticket], %[now_serving]\n" "4:\n" ".set pop\n" : - [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), + [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), [my_ticket] "=r"(my_ticket) ); diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h index 4b4d0ecfd9eb..2188e65afb86 100644 --- a/arch/mips/include/asm/octeon/cvmx-pow.h +++ b/arch/mips/include/asm/octeon/cvmx-pow.h @@ -1066,7 +1066,7 @@ static inline void __cvmx_pow_warn_if_pending_switch(const char *function) uint64_t switch_complete; CVMX_MF_CHORD(switch_complete); if (!switch_complete) - pr_warning("%s called with tag switch in progress\n", function); + pr_warn("%s called with tag switch in progress\n", function); } /** @@ -1084,8 +1084,7 @@ static inline void cvmx_pow_tag_sw_wait(void) if (unlikely(switch_complete)) break; if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) { - pr_warning("Tag switch is taking a long time, " - "possible deadlock\n"); + pr_warn("Tag switch is taking a long time, possible deadlock\n"); start_cycle = -MAX_CYCLES - 1; } } @@ -1296,19 +1295,16 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, __cvmx_pow_warn_if_pending_switch(__func__); current_tag = cvmx_pow_get_current_tag(); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) - pr_warning("%s called with NULL_NULL tag\n", - __func__); + pr_warn("%s called with NULL_NULL tag\n", __func__); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) - pr_warning("%s called with NULL tag\n", __func__); + pr_warn("%s called with NULL tag\n", __func__); if ((current_tag.s.type == tag_type) && (current_tag.s.tag == tag)) - pr_warning("%s called to perform a tag switch to the " - "same tag\n", - __func__); + pr_warn("%s called to perform a tag switch to the same tag\n", + __func__); if (tag_type == CVMX_POW_TAG_TYPE_NULL) - pr_warning("%s called to perform a tag switch to " - "NULL. Use cvmx_pow_tag_sw_null() instead\n", - __func__); + pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", + __func__); } /* @@ -1407,23 +1403,19 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, __cvmx_pow_warn_if_pending_switch(__func__); current_tag = cvmx_pow_get_current_tag(); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) - pr_warning("%s called with NULL_NULL tag\n", - __func__); + pr_warn("%s called with NULL_NULL tag\n", __func__); if ((current_tag.s.type == tag_type) && (current_tag.s.tag == tag)) - pr_warning("%s called to perform a tag switch to " - "the same tag\n", - __func__); + pr_warn("%s called to perform a tag switch to the same tag\n", + __func__); if (tag_type == CVMX_POW_TAG_TYPE_NULL) - pr_warning("%s called to perform a tag switch to " - "NULL. Use cvmx_pow_tag_sw_null() instead\n", - __func__); + pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", + __func__); if (wqp != cvmx_phys_to_ptr(0x80)) if (wqp != cvmx_pow_get_current_wqp()) - pr_warning("%s passed WQE(%p) doesn't match " - "the address in the POW(%p)\n", - __func__, wqp, - cvmx_pow_get_current_wqp()); + pr_warn("%s passed WQE(%p) doesn't match the address in the POW(%p)\n", + __func__, wqp, + cvmx_pow_get_current_wqp()); } /* @@ -1507,12 +1499,10 @@ static inline void cvmx_pow_tag_sw_null_nocheck(void) __cvmx_pow_warn_if_pending_switch(__func__); current_tag = cvmx_pow_get_current_tag(); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) - pr_warning("%s called with NULL_NULL tag\n", - __func__); + pr_warn("%s called with NULL_NULL tag\n", __func__); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) - pr_warning("%s called when we already have a " - "NULL tag\n", - __func__); + pr_warn("%s called when we already have a NULL tag\n", + __func__); } tag_req.u64 = 0; @@ -1725,17 +1715,14 @@ static inline void cvmx_pow_tag_sw_desched_nocheck( __cvmx_pow_warn_if_pending_switch(__func__); current_tag = cvmx_pow_get_current_tag(); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) - pr_warning("%s called with NULL_NULL tag\n", - __func__); + pr_warn("%s called with NULL_NULL tag\n", __func__); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) - pr_warning("%s called with NULL tag. Deschedule not " - "allowed from NULL state\n", - __func__); + pr_warn("%s called with NULL tag. Deschedule not allowed from NULL state\n", + __func__); if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC)) - pr_warning("%s called where neither the before or " - "after tag is ATOMIC\n", - __func__); + pr_warn("%s called where neither the before or after tag is ATOMIC\n", + __func__); } tag_req.u64 = 0; @@ -1832,12 +1819,10 @@ static inline void cvmx_pow_desched(uint64_t no_sched) __cvmx_pow_warn_if_pending_switch(__func__); current_tag = cvmx_pow_get_current_tag(); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) - pr_warning("%s called with NULL_NULL tag\n", - __func__); + pr_warn("%s called with NULL_NULL tag\n", __func__); if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) - pr_warning("%s called with NULL tag. Deschedule not " - "expected from NULL state\n", - __func__); + pr_warn("%s called with NULL tag. Deschedule not expected from NULL state\n", + __func__); } /* Need to make sure any writes to the work queue entry are complete */ diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index f991e7701d3d..33db1c806b01 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -451,67 +451,4 @@ static inline uint32_t cvmx_octeon_num_cores(void) return cvmx_pop(ciu_fuse); } -/** - * Read a byte of fuse data - * @byte_addr: address to read - * - * Returns fuse value: 0 or 1 - */ -static uint8_t cvmx_fuse_read_byte(int byte_addr) -{ - union cvmx_mio_fus_rcmd read_cmd; - - read_cmd.u64 = 0; - read_cmd.s.addr = byte_addr; - read_cmd.s.pend = 1; - cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64); - while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) - && read_cmd.s.pend) - ; - return read_cmd.s.dat; -} - -/** - * Read a single fuse bit - * - * @fuse: Fuse number (0-1024) - * - * Returns fuse value: 0 or 1 - */ -static inline int cvmx_fuse_read(int fuse) -{ - return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1; -} - -static inline int cvmx_octeon_model_CN36XX(void) -{ - return OCTEON_IS_MODEL(OCTEON_CN38XX) - && !cvmx_octeon_is_pass1() - && cvmx_fuse_read(264); -} - -static inline int cvmx_octeon_zip_present(void) -{ - return octeon_has_feature(OCTEON_FEATURE_ZIP); -} - -static inline int cvmx_octeon_dfa_present(void) -{ - if (!OCTEON_IS_MODEL(OCTEON_CN38XX) - && !OCTEON_IS_MODEL(OCTEON_CN31XX) - && !OCTEON_IS_MODEL(OCTEON_CN58XX)) - return 0; - else if (OCTEON_IS_MODEL(OCTEON_CN3020)) - return 0; - else if (cvmx_octeon_is_pass1()) - return 1; - else - return !cvmx_fuse_read(120); -} - -static inline int cvmx_octeon_crypto_present(void) -{ - return octeon_has_feature(OCTEON_FEATURE_CRYPTO); -} - #endif /* __CVMX_H__ */ diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h index 90e05a8d4b15..c4fe81f47f53 100644 --- a/arch/mips/include/asm/octeon/octeon-feature.h +++ b/arch/mips/include/asm/octeon/octeon-feature.h @@ -86,8 +86,6 @@ enum octeon_feature { OCTEON_MAX_FEATURE }; -static inline int cvmx_fuse_read(int fuse); - /** * Determine if the current Octeon supports a specific feature. These * checks have been optimized to be fairly quick, but they should still @@ -105,33 +103,6 @@ static inline int octeon_has_feature(enum octeon_feature feature) case OCTEON_FEATURE_SAAD: return !OCTEON_IS_MODEL(OCTEON_CN3XXX); - case OCTEON_FEATURE_ZIP: - if (OCTEON_IS_MODEL(OCTEON_CN30XX) - || OCTEON_IS_MODEL(OCTEON_CN50XX) - || OCTEON_IS_MODEL(OCTEON_CN52XX)) - return 0; - else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)) - return 1; - else - return !cvmx_fuse_read(121); - - case OCTEON_FEATURE_CRYPTO: - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - union cvmx_mio_fus_dat2 fus_2; - fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2); - if (fus_2.s.nocrypto || fus_2.s.nomul) { - return 0; - } else if (!fus_2.s.dorm_crypto) { - return 1; - } else { - union cvmx_rnm_ctl_status st; - st.u64 = cvmx_read_csr(CVMX_RNM_CTL_STATUS); - return st.s.eer_val; - } - } else { - return !cvmx_fuse_read(90); - } - case OCTEON_FEATURE_DORM_CRYPTO: if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { union cvmx_mio_fus_dat2 fus_2; @@ -188,29 +159,6 @@ static inline int octeon_has_feature(enum octeon_feature feature) && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X); - case OCTEON_FEATURE_DFA: - if (!OCTEON_IS_MODEL(OCTEON_CN38XX) - && !OCTEON_IS_MODEL(OCTEON_CN31XX) - && !OCTEON_IS_MODEL(OCTEON_CN58XX)) - return 0; - else if (OCTEON_IS_MODEL(OCTEON_CN3020)) - return 0; - else - return !cvmx_fuse_read(120); - - case OCTEON_FEATURE_HFA: - if (!OCTEON_IS_MODEL(OCTEON_CN6XXX)) - return 0; - else - return !cvmx_fuse_read(90); - - case OCTEON_FEATURE_DFM: - if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) - || OCTEON_IS_MODEL(OCTEON_CN66XX))) - return 0; - else - return !cvmx_fuse_read(90); - case OCTEON_FEATURE_MDIO_CLAUSE_45: return !(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX) diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h index e2c122c6a657..e8a1c2fd52cd 100644 --- a/arch/mips/include/asm/octeon/octeon-model.h +++ b/arch/mips/include/asm/octeon/octeon-model.h @@ -326,8 +326,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model) #define OCTEON_IS_COMMON_BINARY() 1 #undef OCTEON_MODEL -const char *octeon_model_get_string(uint32_t chip_id); -const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer); +const char *__init octeon_model_get_string(uint32_t chip_id); /* * Return the octeon family, i.e., ProcessorID of the PrID register. diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h index 2474fc5d1751..af81ab0da55f 100644 --- a/arch/mips/include/asm/paccess.h +++ b/arch/mips/include/asm/paccess.h @@ -56,6 +56,7 @@ struct __large_pstruct { unsigned long buf[100]; }; "1:\t" insn "\t%1,%2\n\t" \ "move\t%0,$0\n" \ "2:\n\t" \ + ".insn\n\t" \ ".section\t.fixup,\"ax\"\n" \ "3:\tli\t%0,%3\n\t" \ "move\t%1,$0\n\t" \ @@ -94,6 +95,7 @@ extern void __get_dbe_unknown(void); "1:\t" insn "\t%1,%2\n\t" \ "move\t%0,$0\n" \ "2:\n\t" \ + ".insn\n\t" \ ".section\t.fixup,\"ax\"\n" \ "3:\tli\t%0,%3\n\t" \ "j\t2b\n\t" \ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 3be81803595d..154b70a10483 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -116,7 +116,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, /* * These are used to make use of C type-checking.. */ -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 974b0e308963..69529624a005 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -84,7 +84,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { - phys_t size = resource_size(rsrc); + phys_addr_t size = resource_size(rsrc); *start = fixup_bigphys_addr(rsrc->start, size); *end = rsrc->start + size; diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index cd7d6064bcbe..68984b612f9d 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -69,7 +69,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) #endif -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT #define pte_ERROR(e) \ printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) #else @@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp) pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); } -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) static inline pte_t @@ -126,7 +126,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) #endif -#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ +#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ #define __pgd_offset(address) pgd_index(address) #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) @@ -155,73 +155,75 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) /* Swap entries must have VALID bit cleared. */ -#define __swp_type(x) (((x).val >> 10) & 0x1f) -#define __swp_offset(x) ((x).val >> 15) -#define __swp_entry(type,offset) \ - ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) +#define __swp_type(x) (((x).val >> 10) & 0x1f) +#define __swp_offset(x) ((x).val >> 15) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) /* - * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range: + * Encode and decode a nonlinear file mapping entry */ -#define PTE_FILE_MAX_BITS 28 - -#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \ - (((_pte).pte >> 2 ) & 0x38) | \ - (((_pte).pte >> 10) << 6 )) +#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \ + (((_pte).pte >> 2 ) & 0x38) | \ + (((_pte).pte >> 10) << 6 )) -#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \ - (((off) & 0x38) << 2 ) | \ - (((off) >> 6 ) << 10) | \ - _PAGE_FILE }) +#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \ + (((off) & 0x38) << 2 ) | \ + (((off) >> 6 ) << 10) | \ + _PAGE_FILE }) +/* + * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range: + */ +#define PTE_FILE_MAX_BITS 28 #else +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + /* Swap entries must have VALID and GLOBAL bits cleared. */ -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) -#define __swp_type(x) (((x).val >> 2) & 0x1f) -#define __swp_offset(x) ((x).val >> 7) -#define __swp_entry(type,offset) \ - ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) -#else -#define __swp_type(x) (((x).val >> 8) & 0x1f) -#define __swp_offset(x) ((x).val >> 13) -#define __swp_entry(type,offset) \ - ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) -#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ +#define __swp_type(x) (((x).val >> 2) & 0x1f) +#define __swp_offset(x) ((x).val >> 7) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) +#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) /* * Bits 0 and 1 of pte_high are taken, use the rest for the page offset... */ -#define PTE_FILE_MAX_BITS 30 - -#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2) -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 }) +#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2) +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 }) +#define PTE_FILE_MAX_BITS 30 #else /* - * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range: + * Constraints: + * _PAGE_PRESENT at bit 0 + * _PAGE_MODIFIED at bit 4 + * _PAGE_GLOBAL at bit 6 + * _PAGE_VALID at bit 7 */ -#define PTE_FILE_MAX_BITS 28 +#define __swp_type(x) (((x).val >> 8) & 0x1f) +#define __swp_offset(x) ((x).val >> 13) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \ - (((_pte).pte >> 2) & 0x8) | \ - (((_pte).pte >> 8) << 4)) +/* + * Encode and decode a nonlinear file mapping entry + */ +#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \ + (((_pte).pte >> 2) & 0x8) | \ + (((_pte).pte >> 8) << 4)) -#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \ - (((off) & 0x8) << 2) | \ - (((off) >> 4) << 8) | \ - _PAGE_FILE }) -#endif +#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \ + (((off) & 0x8) << 2) | \ + (((off) >> 4) << 8) | \ + _PAGE_FILE }) -#endif +#define PTE_FILE_MAX_BITS 28 +#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) -#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) -#else -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#endif +#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */ #endif /* _ASM_PGTABLE_32_H */ diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index e747bfa0be7e..ca11f14f40a3 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -32,39 +32,41 @@ * unpredictable things. The code (when it is written) to deal with * this problem will be in the update_mmu_cache() code for the r4k. */ -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) /* * The following bits are directly used by the TLB hardware */ -#define _PAGE_R4KBUG (1 << 0) /* workaround for r4k bug */ -#define _PAGE_GLOBAL (1 << 0) -#define _PAGE_VALID_SHIFT 1 +#define _PAGE_GLOBAL_SHIFT 0 +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) -#define _PAGE_SILENT_READ (1 << 1) /* synonym */ -#define _PAGE_DIRTY_SHIFT 2 -#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */ -#define _PAGE_SILENT_WRITE (1 << 2) -#define _CACHE_SHIFT 3 -#define _CACHE_MASK (7 << 3) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_MASK (7 << _CACHE_SHIFT) /* * The following bits are implemented in software * * _PAGE_FILE semantics: set:pagecache unset:swap */ -#define _PAGE_PRESENT_SHIFT 6 +#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) -#define _PAGE_READ_SHIFT 7 +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) #define _PAGE_READ (1 << _PAGE_READ_SHIFT) -#define _PAGE_WRITE_SHIFT 8 +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) -#define _PAGE_ACCESSED_SHIFT 9 +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) -#define _PAGE_MODIFIED_SHIFT 10 +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) -#define _PAGE_FILE (1 << 10) +#define _PAGE_SILENT_READ _PAGE_VALID +#define _PAGE_SILENT_WRITE _PAGE_DIRTY +#define _PAGE_FILE _PAGE_MODIFIED + +#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) @@ -172,7 +174,7 @@ #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) -#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */ +#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ #ifndef _PFN_SHIFT #define _PFN_SHIFT PAGE_SHIFT diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index d6d1928539b1..62a6ba383d4f 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -125,7 +125,7 @@ do { \ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) @@ -227,7 +227,7 @@ extern pgd_t swapper_pg_dir[]; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } @@ -297,13 +297,13 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); return pte; } static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); return pte; } @@ -382,13 +382,13 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte.pte_low &= _PAGE_CHG_MASK; - pte.pte_high &= ~0x3f; + pte.pte_high &= (_PFN_MASK | _CACHE_MASK); pte.pte_low |= pgprot_val(newprot); - pte.pte_high |= pgprot_val(newprot) & 0x3f; + pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); return pte; } #else @@ -419,7 +419,7 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); static inline int io_remap_pfn_range(struct vm_area_struct *vma, @@ -428,7 +428,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long size, pgprot_t prot) { - phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); + phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); } #define io_remap_pfn_range io_remap_pfn_range diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index a9494c0141fb..eaa26270a5e5 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -22,6 +22,7 @@ extern void device_tree_init(void); struct boot_param_header; extern void __dt_setup_arch(void *bph); +extern int __dt_register_buses(const char *bus0, const char *bus1); #define dt_setup_arch(sym) \ ({ \ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index cd6e0afc6833..e293a8d89a6d 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -47,79 +47,20 @@ extern void (*r4k_blast_icache)(void); #ifdef CONFIG_MIPS_MT -/* - * Optionally force single-threaded execution during I-cache flushes. - */ -#define PROTECT_CACHE_FLUSHES 1 - -#ifdef PROTECT_CACHE_FLUSHES - -extern int mt_protiflush; -extern int mt_protdflush; -extern void mt_cflush_lockdown(void); -extern void mt_cflush_release(void); - -#define BEGIN_MT_IPROT \ - unsigned long flags = 0; \ - unsigned long mtflags = 0; \ - if(mt_protiflush) { \ - local_irq_save(flags); \ - ehb(); \ - mtflags = dvpe(); \ - mt_cflush_lockdown(); \ - } - -#define END_MT_IPROT \ - if(mt_protiflush) { \ - mt_cflush_release(); \ - evpe(mtflags); \ - local_irq_restore(flags); \ - } - -#define BEGIN_MT_DPROT \ - unsigned long flags = 0; \ - unsigned long mtflags = 0; \ - if(mt_protdflush) { \ - local_irq_save(flags); \ - ehb(); \ - mtflags = dvpe(); \ - mt_cflush_lockdown(); \ - } - -#define END_MT_DPROT \ - if(mt_protdflush) { \ - mt_cflush_release(); \ - evpe(mtflags); \ - local_irq_restore(flags); \ - } - -#else - -#define BEGIN_MT_IPROT -#define BEGIN_MT_DPROT -#define END_MT_IPROT -#define END_MT_DPROT - -#endif /* PROTECT_CACHE_FLUSHES */ - #define __iflush_prologue \ unsigned long redundance; \ extern int mt_n_iflushes; \ - BEGIN_MT_IPROT \ for (redundance = 0; redundance < mt_n_iflushes; redundance++) { #define __iflush_epilogue \ - END_MT_IPROT \ } #define __dflush_prologue \ unsigned long redundance; \ extern int mt_n_dflushes; \ - BEGIN_MT_DPROT \ for (redundance = 0; redundance < mt_n_dflushes; redundance++) { #define __dflush_epilogue \ - END_MT_DPROT \ } #define __inv_dflush_prologue __dflush_prologue diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 78d201fb6c87..c6d06d383ef9 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -12,6 +12,7 @@ #include <linux/compiler.h> #include <asm/barrier.h> +#include <asm/compiler.h> #include <asm/war.h> /* @@ -88,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -121,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -163,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -187,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+m" (lock->lock), + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -234,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -244,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " bltz %1, 1b \n" " addu %1, 1 \n" "2: sc %1, %0 \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -268,8 +269,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -277,8 +278,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -298,8 +299,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -308,8 +309,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " bnez %1, 1b \n" " lui %1, 0x8000 \n" "2: sc %1, %0 \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -348,8 +349,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { __asm__ __volatile__( @@ -365,8 +366,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } @@ -392,8 +393,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " li %2, 1 \n" " .set reorder \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } else { do { @@ -405,8 +406,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " sc %1, %0 \n" " li %2, 1 \n" "2: \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), + "=&r" (ret) + : GCC_OFF12_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 7de865805deb..99eea59604e9 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */ #define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */ +#define TIF_HYBRID_FPREGS 28 /* 64b FP registers, odd singles in bits 63:32 of even doubles */ #define TIF_USEDMSA 29 /* MSA has been used this quantum */ #define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ @@ -135,6 +136,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) #define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS) +#define _TIF_HYBRID_FPREGS (1<<TIF_HYBRID_FPREGS) #define _TIF_USEDMSA (1<<TIF_USEDMSA) #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index 8f3047d611ee..8ab2874225c4 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h @@ -46,19 +46,17 @@ extern unsigned int mips_hpt_frequency; * so it lives here. */ extern int (*perf_irq)(void); +extern int __weak get_c0_perfcount_int(void); /* * Initialize the calling CPU's compare interrupt as clockevent device */ extern unsigned int __weak get_c0_compare_int(void); extern int r4k_clockevent_init(void); -extern int gic_clockevent_init(void); static inline int mips_clockevent_init(void) { -#if defined(CONFIG_CEVT_GIC) - return (gic_clockevent_init() | r4k_clockevent_init()); -#elif defined(CONFIG_CEVT_R4K) +#ifdef CONFIG_CEVT_R4K return r4k_clockevent_init(); #else return -ENXIO; diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h index a845aafedee4..148d42a17f30 100644 --- a/arch/mips/include/asm/types.h +++ b/arch/mips/include/asm/types.h @@ -11,23 +11,7 @@ #ifndef _ASM_TYPES_H #define _ASM_TYPES_H -# include <asm-generic/int-ll64.h> +#include <asm-generic/int-ll64.h> #include <uapi/asm/types.h> -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifndef __ASSEMBLY__ - -/* - * Don't use phys_t. You've been warned. - */ -#ifdef CONFIG_64BIT_PHYS_ADDR -typedef unsigned long long phys_t; -#else -typedef unsigned long phys_t; -#endif - -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_TYPES_H */ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 22a5624e2fd2..bf8b32450ef6 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1325,33 +1325,6 @@ strncpy_from_user(char *__to, const char __user *__from, long __len) return res; } -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -static inline long __strlen_user(const char __user *s) -{ - long res; - - if (segment_eq(get_fs(), get_ds())) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - __MODULE_JAL(__strlen_kernel_nocheck_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s) - : "$2", "$4", __UA_t0, "$31"); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - __MODULE_JAL(__strlen_user_nocheck_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s) - : "$2", "$4", __UA_t0, "$31"); - } - - return res; -} - /* * strlen_user: - Get the size of a string in user space. * @str: The string to measure. diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 708c5d414905..fc1cdd25fcda 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -136,9 +136,11 @@ Ip_u1s2(_lui); Ip_u2s3u1(_lw); Ip_u3u1u2(_lwx); Ip_u1u2u3(_mfc0); +Ip_u1u2u3(_mfhc0); Ip_u1(_mfhi); Ip_u1(_mflo); Ip_u1u2u3(_mtc0); +Ip_u1u2u3(_mthc0); Ip_u3u1u2(_mul); Ip_u3u1u2(_or); Ip_u2u1u3(_ori); diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 4bfdb9d4c186..89c22433b1c6 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -108,9 +108,10 @@ enum rt_op { */ enum cop_op { mfc_op = 0x00, dmfc_op = 0x01, - cfc_op = 0x02, mfhc_op = 0x03, - mtc_op = 0x04, dmtc_op = 0x05, - ctc_op = 0x06, mthc_op = 0x07, + cfc_op = 0x02, mfhc0_op = 0x02, + mfhc_op = 0x03, mtc_op = 0x04, + dmtc_op = 0x05, ctc_op = 0x06, + mthc0_op = 0x06, mthc_op = 0x07, bc_op = 0x08, cop_op = 0x10, copm_op = 0x18 }; diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index e81174432bab..d08f83f19db5 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -92,6 +92,10 @@ typedef struct siginfo { int _trapno; /* TRAP # which caused the signal */ #endif short _addr_lsb; + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; } _sigfault; /* SIGPOLL, SIGXFSZ (To do ...) */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index a14baa218c76..dec3c850f36b 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -98,4 +98,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c index 76eafcb79c89..ef796f97b996 100644 --- a/arch/mips/jz4740/setup.c +++ b/arch/mips/jz4740/setup.c @@ -32,7 +32,7 @@ static void __init jz4740_detect_mem(void) { void __iomem *jz_emc_base; u32 ctrl, bus, bank, rows, cols; - phys_t size; + phys_addr_t size; jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100); ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL); diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 008a2fed0584..92987d1bbe5f 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -4,9 +4,10 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ - prom.o ptrace.o reset.o setup.o signal.o syscall.o \ - time.o topology.o traps.o unaligned.o watch.o vdso.o +obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \ + process.o prom.o ptrace.o reset.o setup.o signal.o \ + syscall.o time.o topology.o traps.o unaligned.o watch.o \ + vdso.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg @@ -18,12 +19,10 @@ endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o -obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o -obj-$(CONFIG_CSRC_GIC) += csrc-gic.o obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o @@ -68,7 +67,6 @@ obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o obj-$(CONFIG_MIPS_MSC) += irq-msc01.o obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o -obj-$(CONFIG_IRQ_GIC) += irq-gic.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_32BIT) += scall32-o32.o diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c deleted file mode 100644 index 6093716980b9..000000000000 --- a/arch/mips/kernel/cevt-gic.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2013 Imagination Technologies Ltd. - */ -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/percpu.h> -#include <linux/smp.h> -#include <linux/irq.h> - -#include <asm/time.h> -#include <asm/gic.h> -#include <asm/mips-boards/maltaint.h> - -DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); -int gic_timer_irq_installed; - - -static int gic_next_event(unsigned long delta, struct clock_event_device *evt) -{ - u64 cnt; - int res; - - cnt = gic_read_count(); - cnt += (u64)delta; - gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); - res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; - return res; -} - -void gic_set_clock_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - /* Nothing to do ... */ -} - -irqreturn_t gic_compare_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *cd; - int cpu = smp_processor_id(); - - gic_write_compare(gic_read_compare()); - cd = &per_cpu(gic_clockevent_device, cpu); - cd->event_handler(cd); - return IRQ_HANDLED; -} - -struct irqaction gic_compare_irqaction = { - .handler = gic_compare_interrupt, - .flags = IRQF_PERCPU | IRQF_TIMER, - .name = "timer", -}; - - -void gic_event_handler(struct clock_event_device *dev) -{ -} - -int gic_clockevent_init(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - unsigned int irq; - - if (!cpu_has_counter || !gic_frequency) - return -ENXIO; - - irq = MIPS_GIC_IRQ_BASE; - - cd = &per_cpu(gic_clockevent_device, cpu); - - cd->name = "MIPS GIC"; - cd->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_C3STOP; - - clockevent_set_clock(cd, gic_frequency); - - /* Calculate the min / max delta */ - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); - - cd->rating = 300; - cd->irq = irq; - cd->cpumask = cpumask_of(cpu); - cd->set_next_event = gic_next_event; - cd->set_mode = gic_set_clock_mode; - cd->event_handler = gic_event_handler; - - clockevents_register_device(cd); - - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002); - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK); - - if (gic_timer_irq_installed) - return 0; - - gic_timer_irq_installed = 1; - - setup_irq(irq, &gic_compare_irqaction); - irq_set_handler(irq, handle_percpu_irq); - return 0; -} diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index bc127e22fdab..6acaad0480af 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -11,10 +11,10 @@ #include <linux/percpu.h> #include <linux/smp.h> #include <linux/irq.h> +#include <linux/irqchip/mips-gic.h> #include <asm/time.h> #include <asm/cevt-r4k.h> -#include <asm/gic.h> static int mips_next_event(unsigned long delta, struct clock_event_device *evt) @@ -85,8 +85,8 @@ void mips_event_handler(struct clock_event_device *dev) */ static int c0_compare_int_pending(void) { -#ifdef CONFIG_IRQ_GIC - if (cpu_has_veic) +#ifdef CONFIG_MIPS_GIC + if (gic_present) return gic_get_timer_pending(); #endif return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index dc49cf30c2db..5342674842f5 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -69,6 +69,63 @@ static int __init htw_disable(char *s) __setup("nohtw", htw_disable); +static int mips_ftlb_disabled; +static int mips_has_ftlb_configured; + +static void set_ftlb_enable(struct cpuinfo_mips *c, int enable); + +static int __init ftlb_disable(char *s) +{ + unsigned int config4, mmuextdef; + + /* + * If the core hasn't done any FTLB configuration, there is nothing + * for us to do here. + */ + if (!mips_has_ftlb_configured) + return 1; + + /* Disable it in the boot cpu */ + set_ftlb_enable(&cpu_data[0], 0); + + back_to_back_c0_hazard(); + + config4 = read_c0_config4(); + + /* Check that FTLB has been disabled */ + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + /* MMUSIZEEXT == VTLB ON, FTLB OFF */ + if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) { + /* This should never happen */ + pr_warn("FTLB could not be disabled!\n"); + return 1; + } + + mips_ftlb_disabled = 1; + mips_has_ftlb_configured = 0; + + /* + * noftlb is mainly used for debug purposes so print + * an informative message instead of using pr_debug() + */ + pr_info("FTLB has been disabled\n"); + + /* + * Some of these bits are duplicated in the decode_config4. + * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case + * once FTLB has been disabled so undo what decode_config4 did. + */ + cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways * + cpu_data[0].tlbsizeftlbsets; + cpu_data[0].tlbsizeftlbsets = 0; + cpu_data[0].tlbsizeftlbways = 0; + + return 1; +} + +__setup("noftlb", ftlb_disable); + + static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -140,7 +197,7 @@ static inline unsigned long cpu_get_fpu_id(void) */ static inline int __cpu_has_fpu(void) { - return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); + return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; } static inline unsigned long cpu_get_msa_id(void) @@ -399,6 +456,8 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) ftlb_page = MIPS_CONF4_VFTLBPAGESIZE; /* fall through */ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: + if (mips_ftlb_disabled) + break; newcf4 = (config4 & ~ftlb_page) | (page_size_ftlb(mmuextdef) << MIPS_CONF4_FTLBPAGESIZE_SHIFT); @@ -418,6 +477,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> MIPS_CONF4_FTLBWAYS_SHIFT) + 2; c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; + mips_has_ftlb_configured = 1; break; } } @@ -432,7 +492,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) unsigned int config5; config5 = read_c0_config5(); - config5 &= ~MIPS_CONF5_UFR; + config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); write_c0_config5(config5); if (config5 & MIPS_CONF5_EVA) @@ -453,8 +513,8 @@ static void decode_configs(struct cpuinfo_mips *c) c->scache.flags = MIPS_CACHE_NOT_PRESENT; - /* Enable FTLB if present */ - set_ftlb_enable(c, 1); + /* Enable FTLB if present and not disabled */ + set_ftlb_enable(c, !mips_ftlb_disabled); ok = decode_config0(c); /* Read Config registers. */ BUG_ON(!ok); /* Arch spec violation! */ @@ -1058,6 +1118,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) break; } case PRID_IMP_BMIPS5000: + case PRID_IMP_BMIPS5200: c->cputype = CPU_BMIPS5000; __cpu_name[cpu] = "Broadcom BMIPS5000"; set_elf_platform(cpu, "bmips5000"); @@ -1288,6 +1349,8 @@ void cpu_probe(void) MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; + if (c->fpu_id & MIPS_FPIR_FREP) + c->options |= MIPS_CPU_FRE; } } diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index f291cf99b03a..6fe7790e5868 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -38,7 +38,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, kunmap_atomic(vaddr); } else { if (!kdump_buf_page) { - pr_warning("Kdump: Kdump buffer page not allocated\n"); + pr_warn("Kdump: Kdump buffer page not allocated\n"); return -EFAULT; } @@ -57,7 +57,7 @@ static int __init kdump_buf_page_init(void) kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!kdump_buf_page) { - pr_warning("Kdump: Failed to allocate kdump buffer page\n"); + pr_warn("Kdump: Failed to allocate kdump buffer page\n"); ret = -ENOMEM; } diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c deleted file mode 100644 index e02620901117..000000000000 --- a/arch/mips/kernel/csrc-gic.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/init.h> -#include <linux/time.h> - -#include <asm/gic.h> - -static cycle_t gic_hpt_read(struct clocksource *cs) -{ - return gic_read_count(); -} - -static struct clocksource gic_clocksource = { - .name = "GIC", - .read = gic_hpt_read, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -void __init gic_clocksource_init(unsigned int frequency) -{ - unsigned int config, bits; - - /* Calculate the clocksource mask. */ - GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config); - bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> - (GIC_SH_CONFIG_COUNTBITS_SHF - 2)); - - /* Set clocksource mask. */ - gic_clocksource.mask = CLOCKSOURCE_MASK(bits); - - /* Calculate a somewhat reasonable rating value. */ - gic_clocksource.rating = 200 + frequency / 10000000; - - clocksource_register_hz(&gic_clocksource, frequency); -} diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c new file mode 100644 index 000000000000..c92b15df6893 --- /dev/null +++ b/arch/mips/kernel/elf.c @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/elf.h> +#include <linux/sched.h> + +enum { + FP_ERROR = -1, + FP_DOUBLE_64A = -2, +}; + +int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, + bool is_interp, struct arch_elf_state *state) +{ + struct elfhdr *ehdr = _ehdr; + struct elf_phdr *phdr = _phdr; + struct mips_elf_abiflags_v0 abiflags; + int ret; + + if (config_enabled(CONFIG_64BIT) && + (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + return 0; + if (phdr->p_type != PT_MIPS_ABIFLAGS) + return 0; + if (phdr->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, + sizeof(abiflags)); + if (ret < 0) + return ret; + if (ret != sizeof(abiflags)) + return -EIO; + + /* Record the required FP ABIs for use by mips_check_elf */ + if (is_interp) + state->interp_fp_abi = abiflags.fp_abi; + else + state->fp_abi = abiflags.fp_abi; + + return 0; +} + +static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) +{ + /* If the ABI requirement is provided, simply return that */ + if (in_abi != -1) + return in_abi; + + /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ + if (ehdr->e_flags & EF_MIPS_FP64) + return MIPS_ABI_FP_64; + + /* Default to MIPS_ABI_FP_DOUBLE */ + return MIPS_ABI_FP_DOUBLE; +} + +int arch_check_elf(void *_ehdr, bool has_interpreter, + struct arch_elf_state *state) +{ + struct elfhdr *ehdr = _ehdr; + unsigned fp_abi, interp_fp_abi, abi0, abi1; + + /* Ignore non-O32 binaries */ + if (config_enabled(CONFIG_64BIT) && + (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + return 0; + + fp_abi = get_fp_abi(ehdr, state->fp_abi); + + if (has_interpreter) { + interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); + + abi0 = min(fp_abi, interp_fp_abi); + abi1 = max(fp_abi, interp_fp_abi); + } else { + abi0 = abi1 = fp_abi; + } + + state->overall_abi = FP_ERROR; + + if (abi0 == abi1) { + state->overall_abi = abi0; + } else if (abi0 == MIPS_ABI_FP_ANY) { + state->overall_abi = abi1; + } else if (abi0 == MIPS_ABI_FP_DOUBLE) { + switch (abi1) { + case MIPS_ABI_FP_XX: + state->overall_abi = MIPS_ABI_FP_DOUBLE; + break; + + case MIPS_ABI_FP_64A: + state->overall_abi = FP_DOUBLE_64A; + break; + } + } else if (abi0 == MIPS_ABI_FP_SINGLE || + abi0 == MIPS_ABI_FP_SOFT) { + /* Cannot link with other ABIs */ + } else if (abi0 == MIPS_ABI_FP_OLD_64) { + switch (abi1) { + case MIPS_ABI_FP_XX: + case MIPS_ABI_FP_64: + case MIPS_ABI_FP_64A: + state->overall_abi = MIPS_ABI_FP_64; + break; + } + } else if (abi0 == MIPS_ABI_FP_XX || + abi0 == MIPS_ABI_FP_64 || + abi0 == MIPS_ABI_FP_64A) { + state->overall_abi = MIPS_ABI_FP_64; + } + + switch (state->overall_abi) { + case MIPS_ABI_FP_64: + case MIPS_ABI_FP_64A: + case FP_DOUBLE_64A: + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + return -ELIBBAD; + break; + + case FP_ERROR: + return -ELIBBAD; + } + + return 0; +} + +void mips_set_personality_fp(struct arch_elf_state *state) +{ + if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { + /* + * Use hybrid FPRs for all code which can correctly execute + * with that mode. + */ + switch (state->overall_abi) { + case MIPS_ABI_FP_DOUBLE: + case MIPS_ABI_FP_SINGLE: + case MIPS_ABI_FP_SOFT: + case MIPS_ABI_FP_XX: + case MIPS_ABI_FP_ANY: + /* FR=1, FRE=1 */ + clear_thread_flag(TIF_32BIT_FPREGS); + set_thread_flag(TIF_HYBRID_FPREGS); + return; + } + } + + switch (state->overall_abi) { + case MIPS_ABI_FP_DOUBLE: + case MIPS_ABI_FP_SINGLE: + case MIPS_ABI_FP_SOFT: + /* FR=0 */ + set_thread_flag(TIF_32BIT_FPREGS); + clear_thread_flag(TIF_HYBRID_FPREGS); + break; + + case FP_DOUBLE_64A: + /* FR=1, FRE=1 */ + clear_thread_flag(TIF_32BIT_FPREGS); + set_thread_flag(TIF_HYBRID_FPREGS); + break; + + case MIPS_ABI_FP_64: + case MIPS_ABI_FP_64A: + /* FR=1, FRE=0 */ + clear_thread_flag(TIF_32BIT_FPREGS); + clear_thread_flag(TIF_HYBRID_FPREGS); + break; + + case MIPS_ABI_FP_XX: + case MIPS_ABI_FP_ANY: + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + set_thread_flag(TIF_32BIT_FPREGS); + else + clear_thread_flag(TIF_32BIT_FPREGS); + + clear_thread_flag(TIF_HYBRID_FPREGS); + break; + + default: + case FP_ERROR: + BUG(); + } +} diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 50b364897dda..a74ec3ae557c 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/syscore_ops.h> @@ -308,6 +309,19 @@ static struct resource pic2_io_resource = { .flags = IORESOURCE_BUSY }; +static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq); + irq_set_probe(virq); + return 0; +} + +static struct irq_domain_ops i8259A_ops = { + .map = i8259A_irq_domain_map, + .xlate = irq_domain_xlate_onecell, +}; + /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8259 @@ -315,17 +329,17 @@ static struct resource pic2_io_resource = { */ void __init init_i8259_irqs(void) { - int i; + struct irq_domain *domain; insert_resource(&ioport_resource, &pic1_io_resource); insert_resource(&ioport_resource, &pic2_io_resource); init_8259A(0); - for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { - irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); - irq_set_probe(i); - } + domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0, + &i8259A_ops, NULL); + if (!domain) + panic("Failed to add i8259 IRQ domain"); setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); } diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c deleted file mode 100644 index 9e9d8b9a5b97..000000000000 --- a/arch/mips/kernel/irq-gic.c +++ /dev/null @@ -1,402 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/bitmap.h> -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/irq.h> -#include <linux/clocksource.h> - -#include <asm/io.h> -#include <asm/gic.h> -#include <asm/setup.h> -#include <asm/traps.h> -#include <linux/hardirq.h> -#include <asm-generic/bitops/find.h> - -unsigned int gic_frequency; -unsigned int gic_present; -unsigned long _gic_base; -unsigned int gic_irq_base; -unsigned int gic_irq_flags[GIC_NUM_INTRS]; - -/* The index into this array is the vector # of the interrupt. */ -struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS]; - -struct gic_pcpu_mask { - DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS); -}; - -struct gic_pending_regs { - DECLARE_BITMAP(pending, GIC_NUM_INTRS); -}; - -struct gic_intrmask_regs { - DECLARE_BITMAP(intrmask, GIC_NUM_INTRS); -}; - -static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; -static struct gic_pending_regs pending_regs[NR_CPUS]; -static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; - -#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC) -cycle_t gic_read_count(void) -{ - unsigned int hi, hi2, lo; - - do { - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); - } while (hi2 != hi); - - return (((cycle_t) hi) << 32) + lo; -} - -void gic_write_compare(cycle_t cnt) -{ - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); -} - -void gic_write_cpu_compare(cycle_t cnt, int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - - local_irq_restore(flags); -} - -cycle_t gic_read_compare(void) -{ - unsigned int hi, lo; - - GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi); - GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo); - - return (((cycle_t) hi) << 32) + lo; -} -#endif - -unsigned int gic_get_timer_pending(void) -{ - unsigned int vpe_pending; - - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0); - GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending); - return (vpe_pending & GIC_VPE_PEND_TIMER_MSK); -} - -void gic_bind_eic_interrupt(int irq, int set) -{ - /* Convert irq vector # to hw int # */ - irq -= GIC_PIN_TO_VEC_OFFSET; - - /* Set irq to use shadow set */ - GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set); -} - -void gic_send_ipi(unsigned int intr) -{ - GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); -} - -static void gic_eic_irq_dispatch(void) -{ - unsigned int cause = read_c0_cause(); - int irq; - - irq = (cause & ST0_IM) >> STATUSB_IP2; - if (irq == 0) - irq = -1; - - if (irq >= 0) - do_IRQ(gic_irq_base + irq); - else - spurious_interrupt(); -} - -static void __init vpe_local_setup(unsigned int numvpes) -{ - unsigned long timer_intr = GIC_INT_TMR; - unsigned long perf_intr = GIC_INT_PERFCTR; - unsigned int vpe_ctl; - int i; - - if (cpu_has_veic) { - /* - * GIC timer interrupt -> CPU HW Int X (vector X+2) -> - * map to pin X+2-1 (since GIC adds 1) - */ - timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); - /* - * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) -> - * map to pin X+2-1 (since GIC adds 1) - */ - perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); - } - - /* - * Setup the default performance counter timer interrupts - * for all VPEs - */ - for (i = 0; i < numvpes; i++) { - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); - - /* Are Interrupts locally routable? */ - GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl); - if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK) - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), - GIC_MAP_TO_PIN_MSK | timer_intr); - if (cpu_has_veic) { - set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET, - gic_eic_irq_dispatch); - gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK; - } - - if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK) - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), - GIC_MAP_TO_PIN_MSK | perf_intr); - if (cpu_has_veic) { - set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch); - gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK; - } - } -} - -unsigned int gic_compare_int(void) -{ - unsigned int pending; - - GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending); - if (pending & GIC_VPE_PEND_CMP_MSK) - return 1; - else - return 0; -} - -void gic_get_int_mask(unsigned long *dst, const unsigned long *src) -{ - unsigned int i; - unsigned long *pending, *intrmask, *pcpu_mask; - unsigned long *pending_abs, *intrmask_abs; - - /* Get per-cpu bitmaps */ - pending = pending_regs[smp_processor_id()].pending; - intrmask = intrmask_regs[smp_processor_id()].intrmask; - pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; - - pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED, - GIC_SH_PEND_31_0_OFS); - intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED, - GIC_SH_MASK_31_0_OFS); - - for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) { - GICREAD(*pending_abs, pending[i]); - GICREAD(*intrmask_abs, intrmask[i]); - pending_abs++; - intrmask_abs++; - } - - bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS); - bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS); - bitmap_and(dst, src, pending, GIC_NUM_INTRS); -} - -unsigned int gic_get_int(void) -{ - DECLARE_BITMAP(interrupts, GIC_NUM_INTRS); - - bitmap_fill(interrupts, GIC_NUM_INTRS); - gic_get_int_mask(interrupts, interrupts); - - return find_first_bit(interrupts, GIC_NUM_INTRS); -} - -static void gic_mask_irq(struct irq_data *d) -{ - GIC_CLR_INTR_MASK(d->irq - gic_irq_base); -} - -static void gic_unmask_irq(struct irq_data *d) -{ - GIC_SET_INTR_MASK(d->irq - gic_irq_base); -} - -#ifdef CONFIG_SMP -static DEFINE_SPINLOCK(gic_lock); - -static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, - bool force) -{ - unsigned int irq = (d->irq - gic_irq_base); - cpumask_t tmp = CPU_MASK_NONE; - unsigned long flags; - int i; - - cpumask_and(&tmp, cpumask, cpu_online_mask); - if (cpus_empty(tmp)) - return -1; - - /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); - - /* Re-route this IRQ */ - GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); - - /* Update the pcpu_masks */ - for (i = 0; i < NR_CPUS; i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); - - cpumask_copy(d->affinity, cpumask); - spin_unlock_irqrestore(&gic_lock, flags); - - return IRQ_SET_MASK_OK_NOCOPY; -} -#endif - -static struct irq_chip gic_irq_controller = { - .name = "MIPS GIC", - .irq_ack = gic_irq_ack, - .irq_mask = gic_mask_irq, - .irq_mask_ack = gic_mask_irq, - .irq_unmask = gic_unmask_irq, - .irq_eoi = gic_finish_irq, -#ifdef CONFIG_SMP - .irq_set_affinity = gic_set_affinity, -#endif -}; - -static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, - unsigned int pin, unsigned int polarity, unsigned int trigtype, - unsigned int flags) -{ - struct gic_shared_intr_map *map_ptr; - - /* Setup Intr to Pin mapping */ - if (pin & GIC_MAP_TO_NMI_MSK) { - int i; - - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); - /* FIXME: hack to route NMI to all cpu's */ - for (i = 0; i < NR_CPUS; i += 32) { - GICWRITE(GIC_REG_ADDR(SHARED, - GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)), - 0xffffffff); - } - } else { - GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), - GIC_MAP_TO_PIN_MSK | pin); - /* Setup Intr to CPU mapping */ - GIC_SH_MAP_TO_VPE_SMASK(intr, cpu); - if (cpu_has_veic) { - set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET, - gic_eic_irq_dispatch); - map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET]; - if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR) - BUG(); - map_ptr->intr_list[map_ptr->num_shared_intr++] = intr; - } - } - - /* Setup Intr Polarity */ - GIC_SET_POLARITY(intr, polarity); - - /* Setup Intr Trigger Type */ - GIC_SET_TRIGGER(intr, trigtype); - - /* Init Intr Masks */ - GIC_CLR_INTR_MASK(intr); - - /* Initialise per-cpu Interrupt software masks */ - set_bit(intr, pcpu_masks[cpu].pcpu_mask); - - if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0)) - GIC_SET_INTR_MASK(intr); - if (trigtype == GIC_TRIG_EDGE) - gic_irq_flags[intr] |= GIC_TRIG_EDGE; -} - -static void __init gic_basic_init(int numintrs, int numvpes, - struct gic_intr_map *intrmap, int mapsize) -{ - unsigned int i, cpu; - unsigned int pin_offset = 0; - - board_bind_eic_interrupt = &gic_bind_eic_interrupt; - - /* Setup defaults */ - for (i = 0; i < numintrs; i++) { - GIC_SET_POLARITY(i, GIC_POL_POS); - GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); - GIC_CLR_INTR_MASK(i); - if (i < GIC_NUM_INTRS) { - gic_irq_flags[i] = 0; - gic_shared_intr_map[i].num_shared_intr = 0; - gic_shared_intr_map[i].local_intr_mask = 0; - } - } - - /* - * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract - * one because the GIC will add one (since 0=no intr). - */ - if (cpu_has_veic) - pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); - - /* Setup specifics */ - for (i = 0; i < mapsize; i++) { - cpu = intrmap[i].cpunum; - if (cpu == GIC_UNUSED) - continue; - gic_setup_intr(i, - intrmap[i].cpunum, - intrmap[i].pin + pin_offset, - intrmap[i].polarity, - intrmap[i].trigtype, - intrmap[i].flags); - } - - vpe_local_setup(numvpes); -} - -void __init gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - struct gic_intr_map *intr_map, unsigned int intr_map_size, - unsigned int irqbase) -{ - unsigned int gicconfig; - int numvpes, numintrs; - - _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, - gic_addrspace_size); - gic_irq_base = irqbase; - - GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); - numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> - GIC_SH_CONFIG_NUMINTRS_SHF; - numintrs = ((numintrs + 1) * 8); - - numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> - GIC_SH_CONFIG_NUMVPES_SHF; - numvpes = numvpes + 1; - - gic_basic_init(numintrs, numvpes, intr_map, intr_map_size); - - gic_platform_init(numintrs, &gic_irq_controller); -} diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index e498f2b3646a..590c2c980fd3 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -36,6 +36,7 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> +#include <asm/setup.h> static inline void unmask_mips_irq(struct irq_data *d) { @@ -94,28 +95,24 @@ static struct irq_chip mips_mt_cpu_irq_controller = { .irq_eoi = unmask_mips_irq, }; -void __init mips_cpu_irq_init(void) +asmlinkage void __weak plat_irq_dispatch(void) { - int irq_base = MIPS_CPU_IRQ_BASE; - int i; + unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM; + int irq; - /* Mask interrupts. */ - clear_c0_status(ST0_IM); - clear_c0_cause(CAUSEF_IP); - - /* Software interrupts are used for MT/CMT IPI */ - for (i = irq_base; i < irq_base + 2; i++) - irq_set_chip_and_handler(i, cpu_has_mipsmt ? - &mips_mt_cpu_irq_controller : - &mips_cpu_irq_controller, - handle_percpu_irq); + if (!pending) { + spurious_interrupt(); + return; + } - for (i = irq_base + 2; i < irq_base + 8; i++) - irq_set_chip_and_handler(i, &mips_cpu_irq_controller, - handle_percpu_irq); + pending >>= CAUSEB_IP; + while (pending) { + irq = fls(pending) - 1; + do_IRQ(MIPS_CPU_IRQ_BASE + irq); + pending &= ~BIT(irq); + } } -#ifdef CONFIG_IRQ_DOMAIN static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { @@ -128,6 +125,9 @@ static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, chip = &mips_cpu_irq_controller; } + if (cpu_has_vint) + set_vi_handler(hw, plat_irq_dispatch); + irq_set_chip_and_handler(irq, chip, handle_percpu_irq); return 0; @@ -138,8 +138,7 @@ static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -int __init mips_cpu_intc_init(struct device_node *of_node, - struct device_node *parent) +static void __init __mips_cpu_irq_init(struct device_node *of_node) { struct irq_domain *domain; @@ -151,7 +150,16 @@ int __init mips_cpu_intc_init(struct device_node *of_node, &mips_cpu_intc_irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain for MIPS CPU"); +} +void __init mips_cpu_irq_init(void) +{ + __mips_cpu_irq_init(NULL); +} + +int __init mips_cpu_irq_of_init(struct device_node *of_node, + struct device_node *parent) +{ + __mips_cpu_irq_init(of_node); return 0; } -#endif /* CONFIG_IRQ_DOMAIN */ diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index f76f7a08412d..85bbe9b96759 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -16,7 +16,7 @@ void __iomem *mips_cm_base; void __iomem *mips_cm_l2sync_base; -phys_t __mips_cm_phys_base(void) +phys_addr_t __mips_cm_phys_base(void) { u32 config3 = read_c0_config3(); u32 cmgcr; @@ -30,10 +30,10 @@ phys_t __mips_cm_phys_base(void) return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); } -phys_t mips_cm_phys_base(void) +phys_addr_t mips_cm_phys_base(void) __attribute__((weak, alias("__mips_cm_phys_base"))); -phys_t __mips_cm_l2sync_phys_base(void) +phys_addr_t __mips_cm_l2sync_phys_base(void) { u32 base_reg; @@ -49,13 +49,13 @@ phys_t __mips_cm_l2sync_phys_base(void) return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; } -phys_t mips_cm_l2sync_phys_base(void) +phys_addr_t mips_cm_l2sync_phys_base(void) __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); static void mips_cm_probe_l2sync(void) { unsigned major_rev; - phys_t addr; + phys_addr_t addr; /* L2-only sync was introduced with CM major revision 6 */ major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >> @@ -78,7 +78,7 @@ static void mips_cm_probe_l2sync(void) int mips_cm_probe(void) { - phys_t addr; + phys_addr_t addr; u32 base_reg; addr = mips_cm_phys_base(); diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index ba473608a347..11964501c4b0 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -21,7 +21,7 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); -phys_t __weak mips_cpc_phys_base(void) +phys_addr_t __weak mips_cpc_phys_base(void) { u32 cpc_base; @@ -44,7 +44,7 @@ phys_t __weak mips_cpc_phys_base(void) int mips_cpc_probe(void) { - phys_t addr; + phys_addr_t addr; unsigned cpu; for_each_possible_cpu(cpu) diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 2607c3a4ff7e..17eaf0cf760c 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -24,9 +24,7 @@ extern long __strncpy_from_user_nocheck_asm(char *__to, const char *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char *__from, long __len); -extern long __strlen_kernel_nocheck_asm(const char *s); extern long __strlen_kernel_asm(const char *s); -extern long __strlen_user_nocheck_asm(const char *s); extern long __strlen_user_asm(const char *s); extern long __strnlen_kernel_nocheck_asm(const char *s); extern long __strnlen_kernel_asm(const char *s); @@ -62,9 +60,7 @@ EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_kernel_asm); EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_user_asm); -EXPORT_SYMBOL(__strlen_kernel_nocheck_asm); EXPORT_SYMBOL(__strlen_kernel_asm); -EXPORT_SYMBOL(__strlen_user_nocheck_asm); EXPORT_SYMBOL(__strlen_user_asm); EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm); EXPORT_SYMBOL(__strnlen_kernel_asm); diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index a8f9cdc6f8b0..9466184d0039 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -561,8 +561,8 @@ static int mipspmu_get_irq(void) IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, "mips_perf_pmu", NULL); if (err) { - pr_warning("Unable to request IRQ%d for MIPS " - "performance counters!\n", mipspmu.irq); + pr_warn("Unable to request IRQ%d for MIPS performance counters!\n", + mipspmu.irq); } } else if (cp0_perfcount_irq < 0) { /* @@ -572,8 +572,7 @@ static int mipspmu_get_irq(void) perf_irq = mipsxx_pmu_handle_shared_irq; err = 0; } else { - pr_warning("The platform hasn't properly defined its " - "interrupt controller.\n"); + pr_warn("The platform hasn't properly defined its interrupt controller\n"); err = -ENOENT; } @@ -1614,22 +1613,13 @@ init_hw_perf_events(void) counters = counters_total_to_per_cpu(counters); #endif -#ifdef MSC01E_INT_BASE - if (cpu_has_veic) { - /* - * Using platform specific interrupt controller defines. - */ - irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; - } else { -#endif - if ((cp0_perfcount_irq >= 0) && - (cp0_compare_irq != cp0_perfcount_irq)) - irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; - else - irq = -1; -#ifdef MSC01E_INT_BASE - } -#endif + if (get_c0_perfcount_int) + irq = get_c0_perfcount_int(); + else if ((cp0_perfcount_irq >= 0) && + (cp0_compare_irq != cp0_perfcount_irq)) + irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + else + irq = -1; mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 636b0745d7c7..eb76434828e8 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -42,6 +42,7 @@ #include <asm/isadep.h> #include <asm/inst.h> #include <asm/stacktrace.h> +#include <asm/irq_regs.h> #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) @@ -187,21 +188,21 @@ static inline int is_ra_save_ins(union mips_instruction *ip) */ if (mm_insn_16bit(ip->halfword[0])) { mmi.word = (ip->halfword[0] << 16); - return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && - mmi.mm16_r5_format.rt == 31) || - (mmi.mm16_m_format.opcode == mm_pool16c_op && - mmi.mm16_m_format.func == mm_swm16_op)); + return (mmi.mm16_r5_format.opcode == mm_swsp16_op && + mmi.mm16_r5_format.rt == 31) || + (mmi.mm16_m_format.opcode == mm_pool16c_op && + mmi.mm16_m_format.func == mm_swm16_op); } else { mmi.halfword[0] = ip->halfword[1]; mmi.halfword[1] = ip->halfword[0]; - return ((mmi.mm_m_format.opcode == mm_pool32b_op && - mmi.mm_m_format.rd > 9 && - mmi.mm_m_format.base == 29 && - mmi.mm_m_format.func == mm_swm32_func) || - (mmi.i_format.opcode == mm_sw32_op && - mmi.i_format.rs == 29 && - mmi.i_format.rt == 31)); + return (mmi.mm_m_format.opcode == mm_pool32b_op && + mmi.mm_m_format.rd > 9 && + mmi.mm_m_format.base == 29 && + mmi.mm_m_format.func == mm_swm32_func) || + (mmi.i_format.opcode == mm_sw32_op && + mmi.i_format.rs == 29 && + mmi.i_format.rt == 31); } #else /* sw / sd $ra, offset($sp) */ @@ -233,7 +234,7 @@ static inline int is_jump_ins(union mips_instruction *ip) if (ip->r_format.opcode != mm_pool32a_op || ip->r_format.func != mm_pool32axf_op) return 0; - return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); + return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; #else if (ip->j_format.opcode == j_op) return 1; @@ -260,13 +261,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip) union mips_instruction mmi; mmi.word = (ip->halfword[0] << 16); - return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && - mmi.mm16_r3_format.simmediate && mm_addiusp_func) || - (mmi.mm16_r5_format.opcode == mm_pool16d_op && - mmi.mm16_r5_format.rt == 29)); + return (mmi.mm16_r3_format.opcode == mm_pool16d_op && + mmi.mm16_r3_format.simmediate && mm_addiusp_func) || + (mmi.mm16_r5_format.opcode == mm_pool16d_op && + mmi.mm16_r5_format.rt == 29); } - return (ip->mm_i_format.opcode == mm_addiu32_op && - ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); + return ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; #else /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) @@ -532,3 +533,20 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } + +static void arch_dump_stack(void *info) +{ + struct pt_regs *regs; + + regs = get_irq_regs(); + + if (regs) + show_regs(regs); + + dump_stack(); +} + +void arch_trigger_all_cpu_backtrace(bool include_self) +{ + smp_call_function(arch_dump_stack, NULL, 1); +} diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 5d39bb85bf35..452d4350ce42 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -16,6 +16,7 @@ #include <linux/debugfs.h> #include <linux/of.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <asm/page.h> #include <asm/prom.h> @@ -54,4 +55,21 @@ void __init __dt_setup_arch(void *bph) mips_set_machine_name(of_flat_dt_get_machine_name()); } + +int __init __dt_register_buses(const char *bus0, const char *bus1) +{ + static struct of_device_id of_ids[3]; + + if (!of_have_populated_dt()) + panic("device tree not present"); + + strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible)); + strlcpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible)); + + if (of_platform_populate(NULL, of_ids, NULL, NULL)) + panic("failed to populate DT"); + + return 0; +} + #endif diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index f3b635f86c39..058929041368 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -82,14 +82,14 @@ static struct resource data_resource = { .name = "Kernel data", }; static void *detect_magic __initdata = detect_memory_region; -void __init add_memory_region(phys_t start, phys_t size, long type) +void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) { int x = boot_mem_map.nr_map; int i; /* Sanity check */ if (start + size < start) { - pr_warning("Trying to add an invalid memory region, skipped\n"); + pr_warn("Trying to add an invalid memory region, skipped\n"); return; } @@ -127,10 +127,10 @@ void __init add_memory_region(phys_t start, phys_t size, long type) boot_mem_map.nr_map++; } -void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max) +void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) { void *dm = &detect_magic; - phys_t size; + phys_addr_t size; for (size = sz_min; size < sz_max; size <<= 1) { if (!memcmp(dm, dm + size, sizeof(detect_magic))) @@ -493,7 +493,7 @@ static int usermem __initdata; static int __init early_parse_mem(char *p) { - phys_t start, size; + phys_addr_t start, size; /* * If a user specifies memory size, we @@ -545,9 +545,9 @@ static int __init early_parse_elfcorehdr(char *p) early_param("elfcorehdr", early_parse_elfcorehdr); #endif -static void __init arch_mem_addpart(phys_t mem, phys_t end, int type) +static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type) { - phys_t size; + phys_addr_t size; int i; size = end - mem; diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 16f1e4f2bf3c..545bf11bd2ed 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -530,7 +530,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) struct mips_abi *abi = current->thread.abi; #ifdef CONFIG_CPU_MICROMIPS void *vdso; - unsigned int tmp = (unsigned int)current->mm->context.vdso; + unsigned long tmp = (unsigned long)current->mm->context.vdso; set_isa16_mode(tmp); vdso = (void *)tmp; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 06bb5ed6d80a..b8bd9340c9c7 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -35,6 +35,7 @@ #include <asm/bmips.h> #include <asm/traps.h> #include <asm/barrier.h> +#include <asm/cpu-features.h> static int __maybe_unused max_cpus = 1; @@ -42,6 +43,12 @@ static int __maybe_unused max_cpus = 1; int bmips_smp_enabled = 1; int bmips_cpu_offset; cpumask_t bmips_booted_mask; +unsigned long bmips_tp1_irqs = IE_IRQ1; + +#define RESET_FROM_KSEG0 0x80080800 +#define RESET_FROM_KSEG1 0xa0080800 + +static void bmips_set_reset_vec(int cpu, u32 val); #ifdef CONFIG_SMP @@ -194,6 +201,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) pr_info("SMP: Booting CPU%d...\n", cpu); if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { + /* kseg1 might not exist if this CPU enabled XKS01 */ + bmips_set_reset_vec(cpu, RESET_FROM_KSEG0); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: @@ -203,8 +213,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) bmips5000_send_ipi_single(cpu, 0); break; } - } - else { + } else { + bmips_set_reset_vec(cpu, RESET_FROM_KSEG1); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: @@ -213,17 +224,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) set_c0_brcm_cmt_ctrl(0x01); break; case CPU_BMIPS5000: - if (cpu & 0x01) - write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); - else { - /* - * core N thread 0 was already booted; just - * pulse the NMI line - */ - bmips_write_zscm_reg(0x210, 0xc0000000); - udelay(10); - bmips_write_zscm_reg(0x210, 0x00); - } + write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); break; } cpumask_set_cpu(cpu, &bmips_booted_mask); @@ -235,31 +236,12 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { - /* move NMI vector to kseg0, in case XKS01 is enabled */ - - void __iomem *cbr; - unsigned long old_vec; - unsigned long relo_vector; - int boot_cpu; - switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: - cbr = BMIPS_GET_CBR(); - - boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); - relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : - BMIPS_RELO_VECTOR_CONTROL_1; - - old_vec = __raw_readl(cbr + relo_vector); - __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); - clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); break; case CPU_BMIPS5000: - write_c0_brcm_bootvec(read_c0_brcm_bootvec() & - (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); - write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); break; } @@ -276,7 +258,7 @@ static void bmips_smp_finish(void) write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); irq_enable_hazard(); - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); + set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE); irq_enable_hazard(); } @@ -381,6 +363,7 @@ static int bmips_cpu_disable(void) set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); + clear_c0_status(IE_IRQ5); local_flush_tlb_all(); local_flush_icache_range(0, ~0); @@ -405,7 +388,8 @@ void __ref play_dead(void) * IRQ handlers; this clears ST0_IE and returns immediately. */ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); - change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, + change_c0_status( + IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); irq_disable_hazard(); @@ -473,10 +457,61 @@ static inline void bmips_nmi_handler_setup(void) &bmips_smp_int_vec_end); } +struct reset_vec_info { + int cpu; + u32 val; +}; + +static void bmips_set_reset_vec_remote(void *vinfo) +{ + struct reset_vec_info *info = vinfo; + int shift = info->cpu & 0x01 ? 16 : 0; + u32 mask = ~(0xffff << shift), val = info->val >> 16; + + preempt_disable(); + if (smp_processor_id() > 0) { + smp_call_function_single(0, &bmips_set_reset_vec_remote, + info, 1); + } else { + if (info->cpu & 0x02) { + /* BMIPS5200 "should" use mask/shift, but it's buggy */ + bmips_write_zscm_reg(0xa0, (val << 16) | val); + bmips_read_zscm_reg(0xa0); + } else { + write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) | + (val << shift)); + } + } + preempt_enable(); +} + +static void bmips_set_reset_vec(int cpu, u32 val) +{ + struct reset_vec_info info; + + if (current_cpu_type() == CPU_BMIPS5000) { + /* this needs to run from CPU0 (which is always online) */ + info.cpu = cpu; + info.val = val; + bmips_set_reset_vec_remote(&info); + } else { + void __iomem *cbr = BMIPS_GET_CBR(); + + if (cpu == 0) + __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0); + else { + if (current_cpu_type() != CPU_BMIPS4380) + return; + __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + } + } + __sync(); + back_to_back_c0_hazard(); +} + void bmips_ebase_setup(void) { unsigned long new_ebase = ebase; - void __iomem __maybe_unused *cbr; BUG_ON(ebase != CKSEG0); @@ -496,15 +531,14 @@ void bmips_ebase_setup(void) &bmips_smp_int_vec, 0x80); __sync(); return; + case CPU_BMIPS3300: case CPU_BMIPS4380: /* * 0x8000_0000: reset/NMI (initially in kseg1) * 0x8000_0400: normal vectors */ new_ebase = 0x80000400; - cbr = BMIPS_GET_CBR(); - __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); - __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + bmips_set_reset_vec(0, RESET_FROM_KSEG0); break; case CPU_BMIPS5000: /* @@ -512,10 +546,8 @@ void bmips_ebase_setup(void) * 0x8000_1000: normal vectors */ new_ebase = 0x80001000; - write_c0_brcm_bootvec(0xa0088008); + bmips_set_reset_vec(0, RESET_FROM_KSEG0); write_c0_ebase(new_ebase); - if (max_cpus > 2) - bmips_write_zscm_reg(0xa0, 0xa008a008); break; default: return; diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index fc8a51553426..1e0a93c5a3e7 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -24,6 +24,7 @@ #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/compiler.h> +#include <linux/irqchip/mips-gic.h> #include <linux/atomic.h> #include <asm/cacheflush.h> @@ -37,7 +38,6 @@ #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/amon.h> -#include <asm/gic.h> static void cmp_init_secondary(void) { diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index e6e16a1d4add..bed7590e475f 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -9,13 +9,13 @@ */ #include <linux/io.h> +#include <linux/irqchip/mips-gic.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> #include <asm/bcache.h> -#include <asm/gic.h> #include <asm/mips-cm.h> #include <asm/mips-cpc.h> #include <asm/mips_mt.h> @@ -273,8 +273,8 @@ static void cps_init_secondary(void) if (cpu_has_mipsmt) dmt(); - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | - STATUSF_IP6 | STATUSF_IP7); + change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); } static void cps_smp_finish(void) diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c index 3b21a96d1ccb..5f0ab5bcd01e 100644 --- a/arch/mips/kernel/smp-gic.c +++ b/arch/mips/kernel/smp-gic.c @@ -12,9 +12,9 @@ * option) any later version. */ +#include <linux/irqchip/mips-gic.h> #include <linux/printk.h> -#include <asm/gic.h> #include <asm/mips-cpc.h> #include <asm/smp-ops.h> diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 21f23add04f4..ad86951b73bd 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -21,6 +21,7 @@ #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/interrupt.h> +#include <linux/irqchip/mips-gic.h> #include <linux/compiler.h> #include <linux/smp.h> @@ -34,7 +35,6 @@ #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> -#include <asm/gic.h> static void __init smvp_copy_vpe_config(void) { @@ -119,7 +119,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action) unsigned long flags; int vpflags; -#ifdef CONFIG_IRQ_GIC +#ifdef CONFIG_MIPS_GIC if (gic_present) { gic_send_ipi_single(cpu, action); return; @@ -158,7 +158,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) static void vsmp_init_secondary(void) { -#ifdef CONFIG_IRQ_GIC +#ifdef CONFIG_MIPS_GIC /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 4a4f9dda5658..604b558809c4 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -117,6 +117,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) "2: sc %[tmp], (%[addr]) \n" " beqzl %[tmp], 1b \n" "3: \n" + " .insn \n" " .section .fixup,\"ax\" \n" "4: li %[err], %[efault] \n" " j 3b \n" @@ -142,6 +143,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) "2: sc %[tmp], (%[addr]) \n" " bnez %[tmp], 4f \n" "3: \n" + " .insn \n" " .subsection 2 \n" "4: b 1b \n" " .previous \n" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 22b19c275044..ad3d2031c327 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -724,6 +724,50 @@ int process_fpemu_return(int sig, void __user *fault_addr) } } +static int simulate_fp(struct pt_regs *regs, unsigned int opcode, + unsigned long old_epc, unsigned long old_ra) +{ + union mips_instruction inst = { .word = opcode }; + void __user *fault_addr = NULL; + int sig; + + /* If it's obviously not an FP instruction, skip it */ + switch (inst.i_format.opcode) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case ldc1_op: + case swc1_op: + case sdc1_op: + break; + + default: + return -1; + } + + /* + * do_ri skipped over the instruction via compute_return_epc, undo + * that for the FPU emulator. + */ + regs->cp0_epc = old_epc; + regs->regs[31] = old_ra; + + /* Save the FP context to struct thread_struct */ + lose_fpu(1); + + /* Run the emulator */ + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + + /* If something went wrong, signal */ + process_fpemu_return(sig, fault_addr); + + /* Restore the hardware register state */ + own_fpu(1); + + return 0; +} + /* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */ @@ -1016,6 +1060,9 @@ asmlinkage void do_ri(struct pt_regs *regs) if (status < 0) status = simulate_sync(regs, opcode); + + if (status < 0) + status = simulate_fp(regs, opcode, old_epc, old31); } if (status < 0) @@ -1380,12 +1427,19 @@ asmlinkage void do_mcheck(struct pt_regs *regs) show_regs(regs); if (multi_match) { - printk("Index : %0x\n", read_c0_index()); - printk("Pagemask: %0x\n", read_c0_pagemask()); - printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); - printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); - printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); - printk("\n"); + pr_err("Index : %0x\n", read_c0_index()); + pr_err("Pagemask: %0x\n", read_c0_pagemask()); + pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi()); + pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); + pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); + pr_err("Wired : %0x\n", read_c0_wired()); + pr_err("Pagegrain: %0x\n", read_c0_pagegrain()); + if (cpu_has_htw) { + pr_err("PWField : %0*lx\n", field, read_c0_pwfield()); + pr_err("PWSize : %0*lx\n", field, read_c0_pwsize()); + pr_err("PWCtl : %0x\n", read_c0_pwctl()); + } + pr_err("\n"); dump_tlb_all(); } diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 0f1af58b036a..ed2a278722a9 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -16,9 +16,11 @@ #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/unistd.h> +#include <linux/random.h> #include <asm/vdso.h> #include <asm/uasm.h> +#include <asm/processor.h> /* * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... @@ -67,7 +69,18 @@ subsys_initcall(init_vdso); static unsigned long vdso_addr(unsigned long start) { - return STACK_TOP; + unsigned long offset = 0UL; + + if (current->flags & PF_RANDOMIZE) { + offset = get_random_int(); + offset <<= PAGE_SHIFT; + if (TASK_IS_32BIT_ADDR) + offset &= 0xfffffful; + else + offset &= 0xffffffful; + } + + return STACK_TOP + offset; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 468ffa043607..7edcd4946fc1 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -49,6 +49,7 @@ /* Activation Status Register */ #define ACTS_ASC0_ACT 0x00001000 +#define ACTS_SSC0 0x00002000 #define ACTS_ASC1_ACT 0x00000800 #define ACTS_I2C_ACT 0x00004000 #define ACTS_P0 0x00010000 @@ -147,12 +148,11 @@ static void falcon_gpe_enable(void) if (status & (1 << (GPPC_OFFSET + 1))) return; - if (status_r32(STATUS_CONFIG) == 0) + freq = (status_r32(STATUS_CONFIG) & + GPEFREQ_MASK) >> + GPEFREQ_OFFSET; + if (freq == 0) freq = 1; /* use 625MHz on unfused chip */ - else - freq = (status_r32(STATUS_CONFIG) & - GPEFREQ_MASK) >> - GPEFREQ_OFFSET; /* apply new frequency */ sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1), @@ -260,5 +260,6 @@ void __init ltq_soc_init(void) clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4); clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT); clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT); + clkdev_add_sys("1e100d00.spi", SYSCTL_SYS1, ACTS_SSC0); clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT); } diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 030568a70ac4..6ab10573490d 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -70,6 +70,7 @@ static struct resource ltq_eiu_irq[MAX_EIU]; static void __iomem *ltq_icu_membase[MAX_IM]; static void __iomem *ltq_eiu_membase; static struct irq_domain *ltq_domain; +static int ltq_perfcount_irq; int ltq_eiu_get_irq(int exin) { @@ -378,30 +379,6 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) panic("Failed to remap icu memory"); } - /* the external interrupts are optional and xway only */ - eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); - if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { - /* find out how many external irq sources we have */ - exin_avail = of_irq_count(eiu_node); - - if (exin_avail > MAX_EIU) - exin_avail = MAX_EIU; - - ret = of_irq_to_resource_table(eiu_node, - ltq_eiu_irq, exin_avail); - if (ret != exin_avail) - panic("failed to load external irq resources"); - - if (request_mem_region(res.start, resource_size(&res), - res.name) < 0) - pr_err("Failed to request eiu memory"); - - ltq_eiu_membase = ioremap_nocache(res.start, - resource_size(&res)); - if (!ltq_eiu_membase) - panic("Failed to remap eiu memory"); - } - /* turn off all irqs by default */ for (i = 0; i < MAX_IM; i++) { /* make sure all irqs are turned off by default */ @@ -449,7 +426,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) #endif /* tell oprofile which irq to use */ - cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); + ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); /* * if the timer irq is not one of the mips irqs we need to @@ -458,9 +435,38 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) if (MIPS_CPU_TIMER_IRQ != 7) irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); + /* the external interrupts are optional and xway only */ + eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); + if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { + /* find out how many external irq sources we have */ + exin_avail = of_irq_count(eiu_node); + + if (exin_avail > MAX_EIU) + exin_avail = MAX_EIU; + + ret = of_irq_to_resource_table(eiu_node, + ltq_eiu_irq, exin_avail); + if (ret != exin_avail) + panic("failed to load external irq resources"); + + if (request_mem_region(res.start, resource_size(&res), + res.name) < 0) + pr_err("Failed to request eiu memory"); + + ltq_eiu_membase = ioremap_nocache(res.start, + resource_size(&res)); + if (!ltq_eiu_membase) + panic("Failed to remap eiu memory"); + } + return 0; } +int get_c0_perfcount_int(void) +{ + return ltq_perfcount_irq; +} + unsigned int get_c0_compare_int(void) { return MIPS_CPU_TIMER_IRQ; diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 7447d322d14e..39ab3e786e59 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -36,6 +36,11 @@ const char *get_system_type(void) return soc_info.sys_type; } +int ltq_soc_type(void) +{ + return soc_info.type; +} + void prom_free_prom_memory(void) { } @@ -72,6 +77,8 @@ void __init plat_mem_setup(void) * parsed resulting in our memory appearing */ __dt_setup_arch(__dtb_start); + + strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); } void __init device_tree_init(void) @@ -97,16 +104,7 @@ void __init prom_init(void) int __init plat_of_setup(void) { - static struct of_device_id of_ids[3]; - - if (!of_have_populated_dt()) - panic("device tree not present"); - - strlcpy(of_ids[0].compatible, soc_info.compatible, - sizeof(of_ids[0].compatible)); - strncpy(of_ids[1].compatible, "simple-bus", - sizeof(of_ids[1].compatible)); - return of_platform_populate(NULL, of_ids, NULL, NULL); + return __dt_register_buses(soc_info.compatible, "simple-bus"); } arch_initcall(plat_of_setup); diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile index 087497d97357..a2edc538f477 100644 --- a/arch/mips/lantiq/xway/Makefile +++ b/arch/mips/lantiq/xway/Makefile @@ -1,3 +1,5 @@ obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o +obj-y += vmmc.o + obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c index 7688ac0f06d0..ae8e930f5283 100644 --- a/arch/mips/lantiq/xway/dcdc.c +++ b/arch/mips/lantiq/xway/dcdc.c @@ -46,7 +46,6 @@ static struct platform_driver dcdc_driver = { .probe = dcdc_probe, .driver = { .name = "dcdc-xrx200", - .owner = THIS_MODULE, .of_match_table = dcdc_match, }, }; diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 78a91fa41944..34a116e840d8 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -261,7 +261,6 @@ static struct platform_driver dma_driver = { .probe = ltq_dma_init, .driver = { .name = "dma-xway", - .owner = THIS_MODULE, .of_match_table = dma_match, }, }; diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 850821df924c..f1492b2db017 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c @@ -193,7 +193,6 @@ static struct platform_driver dma_driver = { .probe = gptu_probe, .driver = { .name = "gptu-xway", - .owner = THIS_MODULE, .of_match_table = gptu_match, }, }; diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c index 1fa0f175357e..fe68f9ae47c1 100644 --- a/arch/mips/lantiq/xway/reset.c +++ b/arch/mips/lantiq/xway/reset.c @@ -14,6 +14,7 @@ #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_platform.h> +#include <linux/reset-controller.h> #include <asm/reboot.h> @@ -113,10 +114,77 @@ void ltq_reset_once(unsigned int module, ulong u) ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ); } +static int ltq_assert_device(struct reset_controller_dev *rcdev, + unsigned long id) +{ + u32 val; + + if (id < 8) + return -1; + + val = ltq_rcu_r32(RCU_RST_REQ); + val |= BIT(id); + ltq_rcu_w32(val, RCU_RST_REQ); + + return 0; +} + +static int ltq_deassert_device(struct reset_controller_dev *rcdev, + unsigned long id) +{ + u32 val; + + if (id < 8) + return -1; + + val = ltq_rcu_r32(RCU_RST_REQ); + val &= ~BIT(id); + ltq_rcu_w32(val, RCU_RST_REQ); + + return 0; +} + +static int ltq_reset_device(struct reset_controller_dev *rcdev, + unsigned long id) +{ + ltq_assert_device(rcdev, id); + return ltq_deassert_device(rcdev, id); +} + +static struct reset_control_ops reset_ops = { + .reset = ltq_reset_device, + .assert = ltq_assert_device, + .deassert = ltq_deassert_device, +}; + +static struct reset_controller_dev reset_dev = { + .ops = &reset_ops, + .owner = THIS_MODULE, + .nr_resets = 32, + .of_reset_n_cells = 1, +}; + +void ltq_rst_init(void) +{ + reset_dev.of_node = of_find_compatible_node(NULL, NULL, + "lantiq,xway-reset"); + if (!reset_dev.of_node) + pr_err("Failed to find reset controller node"); + else + reset_controller_register(&reset_dev); +} + static void ltq_machine_restart(char *command) { + u32 val = ltq_rcu_r32(RCU_RST_REQ); + + if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) + val |= RCU_RD_GPHY1_XRX200 | RCU_RD_GPHY0_XRX200; + + val |= RCU_RD_SRST; + local_irq_disable(); - ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ); + ltq_rcu_w32(val, RCU_RST_REQ); unreachable(); } diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c new file mode 100644 index 000000000000..696cd57f6f13 --- /dev/null +++ b/arch/mips/lantiq/xway/vmmc.c @@ -0,0 +1,69 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2012 John Crispin <blogic@openwrt.org> + */ + +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/of_gpio.h> +#include <linux/dma-mapping.h> + +#include <lantiq_soc.h> + +static unsigned int *cp1_base; + +unsigned int *ltq_get_cp1_base(void) +{ + if (!cp1_base) + panic("no cp1 base was set\n"); + + return cp1_base; +} +EXPORT_SYMBOL(ltq_get_cp1_base); + +static int vmmc_probe(struct platform_device *pdev) +{ +#define CP1_SIZE (1 << 20) + int gpio_count; + dma_addr_t dma; + + cp1_base = + (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE, + &dma, GFP_ATOMIC)); + + gpio_count = of_gpio_count(pdev->dev.of_node); + while (gpio_count > 0) { + enum of_gpio_flags flags; + int gpio = of_get_gpio_flags(pdev->dev.of_node, + --gpio_count, &flags); + if (gpio_request(gpio, "vmmc-relay")) + continue; + dev_info(&pdev->dev, "requested GPIO %d\n", gpio); + gpio_direction_output(gpio, + (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1)); + } + + dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base); + + return 0; +} + +static const struct of_device_id vmmc_match[] = { + { .compatible = "lantiq,vmmc-xway" }, + {}, +}; +MODULE_DEVICE_TABLE(of, vmmc_match); + +static struct platform_driver vmmc_driver = { + .probe = vmmc_probe, + .driver = { + .name = "lantiq,vmmc", + .owner = THIS_MODULE, + .of_match_table = vmmc_match, + }, +}; + +module_platform_driver(vmmc_driver); diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c index d4d9d31f152e..199094a40c15 100644 --- a/arch/mips/lantiq/xway/xrx200_phy_fw.c +++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c @@ -24,7 +24,28 @@ static dma_addr_t xway_gphy_load(struct platform_device *pdev) void *fw_addr; size_t size; - if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) { + if (of_get_property(pdev->dev.of_node, "firmware1", NULL) || + of_get_property(pdev->dev.of_node, "firmware2", NULL)) { + switch (ltq_soc_type()) { + case SOC_TYPE_VR9: + if (of_property_read_string(pdev->dev.of_node, + "firmware1", &fw_name)) { + dev_err(&pdev->dev, + "failed to load firmware filename\n"); + return 0; + } + break; + case SOC_TYPE_VR9_2: + if (of_property_read_string(pdev->dev.of_node, + "firmware2", &fw_name)) { + dev_err(&pdev->dev, + "failed to load firmware filename\n"); + return 0; + } + break; + } + } else if (of_property_read_string(pdev->dev.of_node, + "firmware", &fw_name)) { dev_err(&pdev->dev, "failed to load firmware filename\n"); return 0; } @@ -85,7 +106,6 @@ static struct platform_driver xway_phy_driver = { .probe = xway_phy_fw_probe, .driver = { .name = "phy-xrx200", - .owner = THIS_MODULE, .of_match_table = xway_phy_match, }, }; diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c index e3acb2dad33a..8e7e378ce51c 100644 --- a/arch/mips/lib/iomap.c +++ b/arch/mips/lib/iomap.c @@ -97,14 +97,14 @@ EXPORT_SYMBOL(iowrite32be); /* * These are the "repeat MMIO read/write" functions. - * Note the "__raw" accesses, since we don't want to - * convert to CPU byte order. We write in "IO byte - * order" (we also don't have IO barriers). + * Note the "__mem" accesses, since we want to convert + * to CPU byte order if the host bus happens to not match the + * endianness of PCI/ISA (see mach-generic/mangle-port.h). */ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { - u8 data = __raw_readb(addr); + u8 data = __mem_readb(addr); *dst = data; dst++; } @@ -113,7 +113,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { - u16 data = __raw_readw(addr); + u16 data = __mem_readw(addr); *dst = data; dst++; } @@ -122,7 +122,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { - u32 data = __raw_readl(addr); + u32 data = __mem_readl(addr); *dst = data; dst++; } @@ -131,7 +131,7 @@ static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) { while (--count >= 0) { - __raw_writeb(*src, addr); + __mem_writeb(*src, addr); src++; } } @@ -139,7 +139,7 @@ static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) { while (--count >= 0) { - __raw_writew(*src, addr); + __mem_writew(*src, addr); src++; } } @@ -147,7 +147,7 @@ static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) { while (--count >= 0) { - __raw_writel(*src, addr); + __mem_writel(*src, addr); src++; } } diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 7b0e5462ca51..c8fe6b1968fb 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -114,8 +114,7 @@ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ -#endif -#ifdef __MIPSEL__ +#else EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ #endif PTR_SUBU a0, t0 /* long align ptr */ @@ -164,8 +163,7 @@ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) -#endif -#ifdef __MIPSEL__ +#else EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif 1: jr ra diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index 57bcdaf1f1c8..be777d9a3f85 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -42,15 +42,11 @@ notrace void arch_local_irq_disable(void) __asm__ __volatile__( " .set push \n" " .set noat \n" -#if defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 $1,$12 \n" " ori $1,0x1f \n" " xori $1,0x1f \n" " .set noreorder \n" " mtc0 $1,$12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : /* no outputs */ @@ -72,15 +68,11 @@ notrace unsigned long arch_local_irq_save(void) " .set push \n" " .set reorder \n" " .set noat \n" -#if defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 %[flags], $12 \n" " ori $1, %[flags], 0x1f \n" " xori $1, 0x1f \n" " .set noreorder \n" " mtc0 $1, $12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (flags) @@ -103,18 +95,12 @@ notrace void arch_local_irq_restore(unsigned long flags) " .set push \n" " .set noreorder \n" " .set noat \n" -#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) - /* see irqflags.h for inline function */ -#elif defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 $1, $12 \n" " andi %[flags], 1 \n" " ori $1, 0x1f \n" " xori $1, 0x1f \n" " or %[flags], $1 \n" " mtc0 %[flags], $12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (__tmp1) @@ -136,18 +122,12 @@ notrace void __arch_local_irq_restore(unsigned long flags) " .set push \n" " .set noreorder \n" " .set noat \n" -#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) - /* see irqflags.h for inline function */ -#elif defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 $1, $12 \n" " andi %[flags], 1 \n" " ori $1, 0x1f \n" " xori $1, 0x1f \n" " or %[flags], $1 \n" " mtc0 %[flags], $12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (__tmp1) diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 1ef365ab3cd3..975a13855116 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <asm/mipsregs.h> +#include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> @@ -21,7 +22,7 @@ static void dump_tlb(int first, int last) unsigned int asid; unsigned long entryhi, entrylo0; - asid = read_c0_entryhi() & 0xfc0; + asid = read_c0_entryhi() & ASID_MASK; for (i = first; i <= last; i++) { write_c0_index(i<<8); @@ -34,8 +35,8 @@ static void dump_tlb(int first, int last) entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ - if ((entryhi & 0xfffff000) != 0x80000000 - && (entryhi & 0xfc0) == asid) { + if ((entryhi & PAGE_MASK) != KSEG0 + && (entryhi & ASID_MASK) == asid) { /* * Only print entries in use */ @@ -43,8 +44,8 @@ static void dump_tlb(int first, int last) printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", - (entryhi & 0xfffff000), - entryhi & 0xfc0, + entryhi & PAGE_MASK, + entryhi & ASID_MASK, entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, (entrylo0 & (1 << 10)) ? 1 : 0, diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S index bef65c98df59..929bbacd697e 100644 --- a/arch/mips/lib/strlen_user.S +++ b/arch/mips/lib/strlen_user.S @@ -28,7 +28,6 @@ LEAF(__strlen_\func\()_asm) and v0, a0 bnez v0, .Lfault\@ -FEXPORT(__strlen_\func\()_nocheck_asm) move v0, a0 .ifeqs "\func", "kernel" 1: EX(lbu, v1, (v0), .Lfault\@) @@ -48,9 +47,7 @@ FEXPORT(__strlen_\func\()_nocheck_asm) #ifndef CONFIG_EVA /* Set aliases */ .global __strlen_user_asm - .global __strlen_user_nocheck_asm .set __strlen_user_asm, __strlen_kernel_asm - .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm #endif __BUILD_STRLEN_ASM kernel diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 1b91fc6a921b..156de85b82cd 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -86,6 +86,7 @@ config LOONGSON_MACH3X select LOONGSON_MC146818 select ZONE_DMA32 select LEFI_FIRMWARE_INTERFACE + select PHYS48_TO_HT40 help Generic Loongson 3 family machines utilize the 3A/3B revision of Loongson processor and RS780/SBX00 chipset. @@ -107,6 +108,18 @@ config CS5536_MFGPT If unsure, say Yes. +config RS780_HPET + bool "RS780/SBX00 HPET Timer" + depends on LOONGSON_MACH3X + select MIPS_EXTERNAL_TIMER + help + This option enables the hpet timer of AMD RS780/SBX00. + + If you want to enable the Loongson3 CPUFreq Driver, Please enable + this option at first, otherwise, You will get wrong system time. + + If unsure, say Yes. + config LOONGSON_SUSPEND bool default y @@ -131,6 +144,10 @@ config SWIOTLB select NEED_SG_DMA_LENGTH select NEED_DMA_MAP_STATE +config PHYS48_TO_HT40 + bool + default y if CPU_LOONGSON3 + config LOONGSON_MC146818 bool default n diff --git a/arch/mips/loongson/common/cs5536/cs5536_pci.c b/arch/mips/loongson/common/cs5536/cs5536_pci.c index 81bed9d18061..b739723205f8 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_pci.c +++ b/arch/mips/loongson/common/cs5536/cs5536_pci.c @@ -21,6 +21,7 @@ */ #include <linux/types.h> +#include <cs5536/cs5536_pci.h> #include <cs5536/cs5536_vsm.h> enum { @@ -35,21 +36,21 @@ enum { }; static const cs5536_pci_vsm_write vsm_conf_write[] = { - [CS5536_ISA_FUNC] pci_isa_write_reg, - [reserved_func] NULL, - [CS5536_IDE_FUNC] pci_ide_write_reg, - [CS5536_ACC_FUNC] pci_acc_write_reg, - [CS5536_OHCI_FUNC] pci_ohci_write_reg, - [CS5536_EHCI_FUNC] pci_ehci_write_reg, + [CS5536_ISA_FUNC] = pci_isa_write_reg, + [reserved_func] = NULL, + [CS5536_IDE_FUNC] = pci_ide_write_reg, + [CS5536_ACC_FUNC] = pci_acc_write_reg, + [CS5536_OHCI_FUNC] = pci_ohci_write_reg, + [CS5536_EHCI_FUNC] = pci_ehci_write_reg, }; static const cs5536_pci_vsm_read vsm_conf_read[] = { - [CS5536_ISA_FUNC] pci_isa_read_reg, - [reserved_func] NULL, - [CS5536_IDE_FUNC] pci_ide_read_reg, - [CS5536_ACC_FUNC] pci_acc_read_reg, - [CS5536_OHCI_FUNC] pci_ohci_read_reg, - [CS5536_EHCI_FUNC] pci_ehci_read_reg, + [CS5536_ISA_FUNC] = pci_isa_read_reg, + [reserved_func] = NULL, + [CS5536_IDE_FUNC] = pci_ide_read_reg, + [CS5536_ACC_FUNC] = pci_acc_read_reg, + [CS5536_OHCI_FUNC] = pci_ohci_read_reg, + [CS5536_EHCI_FUNC] = pci_ehci_read_reg, }; /* diff --git a/arch/mips/loongson/common/dma-swiotlb.c b/arch/mips/loongson/common/dma-swiotlb.c index c2be01f91575..2c6b989c1bc4 100644 --- a/arch/mips/loongson/common/dma-swiotlb.c +++ b/arch/mips/loongson/common/dma-swiotlb.c @@ -105,11 +105,25 @@ static int loongson_dma_set_mask(struct device *dev, u64 mask) dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { + long nid; +#ifdef CONFIG_PHYS48_TO_HT40 + /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from + * Loongson-3's 48bit address space and embed it into 40bit */ + nid = (paddr >> 44) & 0x3; + paddr = ((nid << 44) ^ paddr) | (nid << 37); +#endif return paddr; } phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { + long nid; +#ifdef CONFIG_PHYS48_TO_HT40 + /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from + * Loongson-3's 48bit address space and embed it into 40bit */ + nid = (daddr >> 37) & 0x3; + daddr = ((nid << 37) ^ daddr) | (nid << 44); +#endif return daddr; } diff --git a/arch/mips/loongson/common/early_printk.c b/arch/mips/loongson/common/early_printk.c index ced461b39069..6ca632e529dc 100644 --- a/arch/mips/loongson/common/early_printk.c +++ b/arch/mips/loongson/common/early_printk.c @@ -30,7 +30,7 @@ void prom_putchar(char c) int timeout; unsigned char *uart_base; - uart_base = (unsigned char *)_loongson_uart_base; + uart_base = (unsigned char *)_loongson_uart_base[0]; timeout = 1024; while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) && diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c index f15228550a22..045ea3d47c87 100644 --- a/arch/mips/loongson/common/env.c +++ b/arch/mips/loongson/common/env.c @@ -21,6 +21,7 @@ #include <asm/bootinfo.h> #include <loongson.h> #include <boot_param.h> +#include <workarounds.h> u32 cpu_clock_freq; EXPORT_SYMBOL(cpu_clock_freq); @@ -31,7 +32,6 @@ u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180}; u64 loongson_freqctrl[MAX_PACKAGES]; unsigned long long smp_group[4]; -int cpuhotplug_workaround = 0; #define parse_even_earlier(res, option, p) \ do { \ @@ -67,6 +67,7 @@ void __init prom_init_env(void) #else struct boot_params *boot_p; struct loongson_params *loongson_p; + struct system_loongson *esys; struct efi_cpuinfo_loongson *ecpu; struct irq_source_routing_table *eirq_source; @@ -74,6 +75,8 @@ void __init prom_init_env(void) boot_p = (struct boot_params *)fw_arg2; loongson_p = &(boot_p->efi.smbios.lp); + esys = (struct system_loongson *) + ((u64)loongson_p + loongson_p->system_offset); ecpu = (struct efi_cpuinfo_loongson *) ((u64)loongson_p + loongson_p->cpu_offset); eirq_source = (struct irq_source_routing_table *) @@ -95,6 +98,7 @@ void __init prom_init_env(void) loongson_chipcfg[2] = 0x900020001fe00180; loongson_chipcfg[3] = 0x900030001fe00180; loongson_sysconf.ht_control_base = 0x90000EFDFB000000; + loongson_sysconf.workarounds = WORKAROUND_CPUFREQ; } else if (ecpu->cputype == Loongson_3B) { loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */ loongson_sysconf.cores_per_package = 8; @@ -111,7 +115,7 @@ void __init prom_init_env(void) loongson_freqctrl[2] = 0x900040001fe001d0; loongson_freqctrl[3] = 0x900060001fe001d0; loongson_sysconf.ht_control_base = 0x90001EFDFB000000; - cpuhotplug_workaround = 1; + loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG; } else { loongson_sysconf.cores_per_node = 1; loongson_sysconf.cores_per_package = 1; @@ -119,6 +123,8 @@ void __init prom_init_env(void) } loongson_sysconf.nr_cpus = ecpu->nr_cpus; + loongson_sysconf.boot_cpu_id = ecpu->cpu_startup_core_id; + loongson_sysconf.reserved_cpus_mask = ecpu->reserved_cores_mask; if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0) loongson_sysconf.nr_cpus = NR_CPUS; loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus + @@ -141,6 +147,24 @@ void __init prom_init_env(void) pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n", loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr, loongson_sysconf.vgabios_addr); + + memset(loongson_sysconf.ecname, 0, 32); + if (esys->has_ec) + memcpy(loongson_sysconf.ecname, esys->ec_name, 32); + loongson_sysconf.workarounds |= esys->workarounds; + + loongson_sysconf.nr_uarts = esys->nr_uarts; + if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS) + loongson_sysconf.nr_uarts = 1; + memcpy(loongson_sysconf.uarts, esys->uarts, + sizeof(struct uart_device) * loongson_sysconf.nr_uarts); + + loongson_sysconf.nr_sensors = esys->nr_sensors; + if (loongson_sysconf.nr_sensors > MAX_SENSORS) + loongson_sysconf.nr_sensors = 0; + if (loongson_sysconf.nr_sensors) + memcpy(loongson_sysconf.sensors, esys->sensors, + sizeof(struct sensor_device) * loongson_sysconf.nr_sensors); #endif if (cpu_clock_freq == 0) { processor_id = (¤t_cpu_data)->processor_id; diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c index 21869908aaa4..29dbaa253061 100644 --- a/arch/mips/loongson/common/gpio.c +++ b/arch/mips/loongson/common/gpio.c @@ -37,7 +37,7 @@ int gpio_get_value(unsigned gpio) val = LOONGSON_GPIODATA; spin_unlock(&gpio_lock); - return ((val & mask) != 0); + return (val & mask) != 0; } EXPORT_SYMBOL(gpio_get_value); diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c index f6af3aba4c86..9b987fe98b5b 100644 --- a/arch/mips/loongson/common/init.c +++ b/arch/mips/loongson/common/init.c @@ -9,6 +9,7 @@ */ #include <linux/bootmem.h> +#include <asm/bootinfo.h> #include <asm/smp-ops.h> #include <loongson.h> diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 1a4797984b8d..f2807bc662a3 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c @@ -19,19 +19,16 @@ #define MACHTYPE_LEN 50 static const char *system_types[] = { - [MACH_LOONGSON_UNKNOWN] "unknown loongson machine", - [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box", - [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box", - [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches", - [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches", - [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f", - [MACH_LEMOTE_NAS] "lemote-nas-2f", - [MACH_LEMOTE_LL2F] "lemote-lynloong-2f", - [MACH_LEMOTE_A1004] "lemote-3a-notebook-a1004", - [MACH_LEMOTE_A1101] "lemote-3a-itx-a1101", - [MACH_LEMOTE_A1201] "lemote-2gq-notebook-a1201", - [MACH_LEMOTE_A1205] "lemote-2gq-aio-a1205", - [MACH_LOONGSON_END] NULL, + [MACH_LOONGSON_UNKNOWN] = "unknown loongson machine", + [MACH_LEMOTE_FL2E] = "lemote-fuloong-2e-box", + [MACH_LEMOTE_FL2F] = "lemote-fuloong-2f-box", + [MACH_LEMOTE_ML2F7] = "lemote-mengloong-2f-7inches", + [MACH_LEMOTE_YL2F89] = "lemote-yeeloong-2f-8.9inches", + [MACH_DEXXON_GDIUM2F10] = "dexxon-gdium-2f", + [MACH_LEMOTE_NAS] = "lemote-nas-2f", + [MACH_LEMOTE_LL2F] = "lemote-lynloong-2f", + [MACH_LOONGSON_GENERIC] = "generic-loongson-machine", + [MACH_LOONGSON_END] = NULL, }; const char *get_system_type(void) diff --git a/arch/mips/loongson/common/rtc.c b/arch/mips/loongson/common/rtc.c index a90d87c01555..b5709af09f7f 100644 --- a/arch/mips/loongson/common/rtc.c +++ b/arch/mips/loongson/common/rtc.c @@ -14,7 +14,7 @@ #include <linux/platform_device.h> #include <linux/mc146818rtc.h> -struct resource loongson_rtc_resources[] = { +static struct resource loongson_rtc_resources[] = { { .start = RTC_PORT(0), .end = RTC_PORT(1), diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c index bd2b7095b6dc..c23fa1373729 100644 --- a/arch/mips/loongson/common/serial.c +++ b/arch/mips/loongson/common/serial.c @@ -38,20 +38,17 @@ .regshift = 0, \ } -static struct plat_serial8250_port uart8250_data[][2] = { - [MACH_LOONGSON_UNKNOWN] {}, - [MACH_LEMOTE_FL2E] {PORT(4, 1843200), {} }, - [MACH_LEMOTE_FL2F] {PORT(3, 1843200), {} }, - [MACH_LEMOTE_ML2F7] {PORT_M(3, 3686400), {} }, - [MACH_LEMOTE_YL2F89] {PORT_M(3, 3686400), {} }, - [MACH_DEXXON_GDIUM2F10] {PORT_M(3, 3686400), {} }, - [MACH_LEMOTE_NAS] {PORT_M(3, 3686400), {} }, - [MACH_LEMOTE_LL2F] {PORT(3, 1843200), {} }, - [MACH_LEMOTE_A1004] {PORT_M(2, 33177600), {} }, - [MACH_LEMOTE_A1101] {PORT_M(2, 25000000), {} }, - [MACH_LEMOTE_A1201] {PORT_M(2, 25000000), {} }, - [MACH_LEMOTE_A1205] {PORT_M(2, 25000000), {} }, - [MACH_LOONGSON_END] {}, +static struct plat_serial8250_port uart8250_data[][MAX_UARTS + 1] = { + [MACH_LOONGSON_UNKNOWN] = {}, + [MACH_LEMOTE_FL2E] = {PORT(4, 1843200), {} }, + [MACH_LEMOTE_FL2F] = {PORT(3, 1843200), {} }, + [MACH_LEMOTE_ML2F7] = {PORT_M(3, 3686400), {} }, + [MACH_LEMOTE_YL2F89] = {PORT_M(3, 3686400), {} }, + [MACH_DEXXON_GDIUM2F10] = {PORT_M(3, 3686400), {} }, + [MACH_LEMOTE_NAS] = {PORT_M(3, 3686400), {} }, + [MACH_LEMOTE_LL2F] = {PORT(3, 1843200), {} }, + [MACH_LOONGSON_GENERIC] = {PORT_M(2, 25000000), {} }, + [MACH_LOONGSON_END] = {}, }; static struct platform_device uart8250_device = { @@ -61,17 +58,52 @@ static struct platform_device uart8250_device = { static int __init serial_init(void) { + int i; unsigned char iotype; iotype = uart8250_data[mips_machtype][0].iotype; - if (UPIO_MEM == iotype) + if (UPIO_MEM == iotype) { + uart8250_data[mips_machtype][0].mapbase = + loongson_uart_base[0]; uart8250_data[mips_machtype][0].membase = - (void __iomem *)_loongson_uart_base; + (void __iomem *)_loongson_uart_base[0]; + } else if (UPIO_PORT == iotype) uart8250_data[mips_machtype][0].iobase = - loongson_uart_base - LOONGSON_PCIIO_BASE; + loongson_uart_base[0] - LOONGSON_PCIIO_BASE; + if (loongson_sysconf.uarts[0].uartclk) + uart8250_data[mips_machtype][0].uartclk = + loongson_sysconf.uarts[0].uartclk; + + for (i = 1; i < loongson_sysconf.nr_uarts; i++) { + iotype = loongson_sysconf.uarts[i].iotype; + uart8250_data[mips_machtype][i].iotype = iotype; + loongson_uart_base[i] = loongson_sysconf.uarts[i].uart_base; + + if (UPIO_MEM == iotype) { + uart8250_data[mips_machtype][i].irq = + MIPS_CPU_IRQ_BASE + loongson_sysconf.uarts[i].int_offset; + uart8250_data[mips_machtype][i].mapbase = + loongson_uart_base[i]; + uart8250_data[mips_machtype][i].membase = + ioremap_nocache(loongson_uart_base[i], 8); + } else if (UPIO_PORT == iotype) { + uart8250_data[mips_machtype][i].irq = + loongson_sysconf.uarts[i].int_offset; + uart8250_data[mips_machtype][i].iobase = + loongson_uart_base[i] - LOONGSON_PCIIO_BASE; + } + + uart8250_data[mips_machtype][i].uartclk = + loongson_sysconf.uarts[i].uartclk; + uart8250_data[mips_machtype][i].flags = + UPF_BOOT_AUTOCONF | UPF_SKIP_TEST; + } + + memset(&uart8250_data[mips_machtype][loongson_sysconf.nr_uarts], + 0, sizeof(struct plat_serial8250_port)); uart8250_device.dev.platform_data = uart8250_data[mips_machtype]; return platform_device_register(&uart8250_device); diff --git a/arch/mips/loongson/common/setup.c b/arch/mips/loongson/common/setup.c index bb4ac922e47a..d477dd6bb326 100644 --- a/arch/mips/loongson/common/setup.c +++ b/arch/mips/loongson/common/setup.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <asm/wbflush.h> +#include <asm/bootinfo.h> #include <loongson.h> diff --git a/arch/mips/loongson/common/time.c b/arch/mips/loongson/common/time.c index 262a1f65b05e..e1a5382ad47e 100644 --- a/arch/mips/loongson/common/time.c +++ b/arch/mips/loongson/common/time.c @@ -12,6 +12,7 @@ */ #include <asm/mc146818-time.h> #include <asm/time.h> +#include <asm/hpet.h> #include <loongson.h> #include <cs5536/cs5536_mfgpt.h> @@ -21,7 +22,11 @@ void __init plat_time_init(void) /* setup mips r4k timer */ mips_hpt_frequency = cpu_clock_freq / 2; +#ifdef CONFIG_RS780_HPET + setup_hpet_timer(); +#else setup_mfgpt0_timer(); +#endif } void read_persistent_clock(struct timespec *ts) diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c index 1e1eeea73fde..9de559d58e1f 100644 --- a/arch/mips/loongson/common/uart_base.c +++ b/arch/mips/loongson/common/uart_base.c @@ -13,22 +13,27 @@ #include <loongson.h> -/* ioremapped */ -unsigned long _loongson_uart_base; -EXPORT_SYMBOL(_loongson_uart_base); /* raw */ -unsigned long loongson_uart_base; +unsigned long loongson_uart_base[MAX_UARTS] = {}; +/* ioremapped */ +unsigned long _loongson_uart_base[MAX_UARTS] = {}; + EXPORT_SYMBOL(loongson_uart_base); +EXPORT_SYMBOL(_loongson_uart_base); void prom_init_loongson_uart_base(void) { switch (mips_machtype) { + case MACH_LOONGSON_GENERIC: + /* The CPU provided serial port (CPU) */ + loongson_uart_base[0] = LOONGSON_REG_BASE + 0x1e0; + break; case MACH_LEMOTE_FL2E: - loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8; + loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x3f8; break; case MACH_LEMOTE_FL2F: case MACH_LEMOTE_LL2F: - loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8; + loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x2f8; break; case MACH_LEMOTE_ML2F7: case MACH_LEMOTE_YL2F89: @@ -36,17 +41,10 @@ void prom_init_loongson_uart_base(void) case MACH_LEMOTE_NAS: default: /* The CPU provided serial port (LPC) */ - loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8; - break; - case MACH_LEMOTE_A1004: - case MACH_LEMOTE_A1101: - case MACH_LEMOTE_A1201: - case MACH_LEMOTE_A1205: - /* The CPU provided serial port (CPU) */ - loongson_uart_base = LOONGSON_REG_BASE + 0x1e0; + loongson_uart_base[0] = LOONGSON_LIO1_BASE + 0x3f8; break; } - _loongson_uart_base = - (unsigned long)ioremap_nocache(loongson_uart_base, 8); + _loongson_uart_base[0] = + (unsigned long)ioremap_nocache(loongson_uart_base[0], 8); } diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c index 6f8682e44483..cab5f43e0e29 100644 --- a/arch/mips/loongson/lemote-2f/irq.c +++ b/arch/mips/loongson/lemote-2f/irq.c @@ -93,13 +93,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id) return IRQ_HANDLED; } -struct irqaction ip6_irqaction = { +static struct irqaction ip6_irqaction = { .handler = ip6_action, .name = "cascade", .flags = IRQF_SHARED | IRQF_NO_THREAD, }; -struct irqaction cascade_irqaction = { +static struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", .flags = IRQF_NO_THREAD, diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c index 79ac694fe744..a26ca7fcd7e0 100644 --- a/arch/mips/loongson/lemote-2f/reset.c +++ b/arch/mips/loongson/lemote-2f/reset.c @@ -76,7 +76,7 @@ static void fl2f_shutdown(void) /* reset support for yeeloong2f and mengloong2f notebook */ -void ml2f_reboot(void) +static void ml2f_reboot(void) { reset_cpu(); diff --git a/arch/mips/loongson/loongson-3/Makefile b/arch/mips/loongson/loongson-3/Makefile index b4df775b9f30..622fead5ebc9 100644 --- a/arch/mips/loongson/loongson-3/Makefile +++ b/arch/mips/loongson/loongson-3/Makefile @@ -1,8 +1,10 @@ # # Makefile for Loongson-3 family machines # -obj-y += irq.o cop2-ex.o +obj-y += irq.o cop2-ex.o platform.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_NUMA) += numa.o + +obj-$(CONFIG_RS780_HPET) += hpet.o diff --git a/arch/mips/loongson/loongson-3/hpet.c b/arch/mips/loongson/loongson-3/hpet.c new file mode 100644 index 000000000000..e898d68668a9 --- /dev/null +++ b/arch/mips/loongson/loongson-3/hpet.c @@ -0,0 +1,257 @@ +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> + +#include <asm/hpet.h> +#include <asm/time.h> + +#define SMBUS_CFG_BASE (loongson_sysconf.ht_control_base + 0x0300a000) +#define SMBUS_PCI_REG40 0x40 +#define SMBUS_PCI_REG64 0x64 +#define SMBUS_PCI_REGB4 0xb4 + +static DEFINE_SPINLOCK(hpet_lock); +DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); + +static unsigned int smbus_read(int offset) +{ + return *(volatile unsigned int *)(SMBUS_CFG_BASE + offset); +} + +static void smbus_write(int offset, int data) +{ + *(volatile unsigned int *)(SMBUS_CFG_BASE + offset) = data; +} + +static void smbus_enable(int offset, int bit) +{ + unsigned int cfg = smbus_read(offset); + + cfg |= bit; + smbus_write(offset, cfg); +} + +static int hpet_read(int offset) +{ + return *(volatile unsigned int *)(HPET_MMIO_ADDR + offset); +} + +static void hpet_write(int offset, int data) +{ + *(volatile unsigned int *)(HPET_MMIO_ADDR + offset) = data; +} + +static void hpet_start_counter(void) +{ + unsigned int cfg = hpet_read(HPET_CFG); + + cfg |= HPET_CFG_ENABLE; + hpet_write(HPET_CFG, cfg); +} + +static void hpet_stop_counter(void) +{ + unsigned int cfg = hpet_read(HPET_CFG); + + cfg &= ~HPET_CFG_ENABLE; + hpet_write(HPET_CFG, cfg); +} + +static void hpet_reset_counter(void) +{ + hpet_write(HPET_COUNTER, 0); + hpet_write(HPET_COUNTER + 4, 0); +} + +static void hpet_restart_counter(void) +{ + hpet_stop_counter(); + hpet_reset_counter(); + hpet_start_counter(); +} + +static void hpet_enable_legacy_int(void) +{ + /* Do nothing on Loongson-3 */ +} + +static void hpet_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + int cfg = 0; + + spin_lock(&hpet_lock); + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + pr_info("set clock event to periodic mode!\n"); + /* stop counter */ + hpet_stop_counter(); + + /* enables the timer0 to generate a periodic interrupt */ + cfg = hpet_read(HPET_T0_CFG); + cfg &= ~HPET_TN_LEVEL; + cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | + HPET_TN_SETVAL | HPET_TN_32BIT; + hpet_write(HPET_T0_CFG, cfg); + + /* set the comparator */ + hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL); + udelay(1); + hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL); + + /* start counter */ + hpet_start_counter(); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + cfg = hpet_read(HPET_T0_CFG); + cfg &= ~HPET_TN_ENABLE; + hpet_write(HPET_T0_CFG, cfg); + break; + case CLOCK_EVT_MODE_ONESHOT: + pr_info("set clock event to one shot mode!\n"); + cfg = hpet_read(HPET_T0_CFG); + /* set timer0 type + * 1 : periodic interrupt + * 0 : non-periodic(oneshot) interrupt + */ + cfg &= ~HPET_TN_PERIODIC; + cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; + hpet_write(HPET_T0_CFG, cfg); + break; + case CLOCK_EVT_MODE_RESUME: + hpet_enable_legacy_int(); + break; + } + spin_unlock(&hpet_lock); +} + +static int hpet_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned int cnt; + int res; + + cnt = hpet_read(HPET_COUNTER); + cnt += delta; + hpet_write(HPET_T0_CMP, cnt); + + res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0; + return res; +} + +static irqreturn_t hpet_irq_handler(int irq, void *data) +{ + int is_irq; + struct clock_event_device *cd; + unsigned int cpu = smp_processor_id(); + + is_irq = hpet_read(HPET_STATUS); + if (is_irq & HPET_T0_IRS) { + /* clear the TIMER0 irq status register */ + hpet_write(HPET_STATUS, HPET_T0_IRS); + cd = &per_cpu(hpet_clockevent_device, cpu); + cd->event_handler(cd); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static struct irqaction hpet_irq = { + .handler = hpet_irq_handler, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, + .name = "hpet", +}; + +/* + * hpet address assignation and irq setting should be done in bios. + * but pmon don't do this, we just setup here directly. + * The operation under is normal. unfortunately, hpet_setup process + * is before pci initialize. + * + * { + * struct pci_dev *pdev; + * + * pdev = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); + * pci_write_config_word(pdev, SMBUS_PCI_REGB4, HPET_ADDR); + * + * ... + * } + */ +static void hpet_setup(void) +{ + /* set hpet base address */ + smbus_write(SMBUS_PCI_REGB4, HPET_ADDR); + + /* enable decodeing of access to HPET MMIO*/ + smbus_enable(SMBUS_PCI_REG40, (1 << 28)); + + /* HPET irq enable */ + smbus_enable(SMBUS_PCI_REG64, (1 << 10)); + + hpet_enable_legacy_int(); +} + +void __init setup_hpet_timer(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + + hpet_setup(); + + cd = &per_cpu(hpet_clockevent_device, cpu); + cd->name = "hpet"; + cd->rating = 320; + cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + cd->set_mode = hpet_set_mode; + cd->set_next_event = hpet_next_event; + cd->irq = HPET_T0_IRQ; + cd->cpumask = cpumask_of(cpu); + clockevent_set_clock(cd, HPET_FREQ); + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = 5000; + + clockevents_register_device(cd); + setup_irq(HPET_T0_IRQ, &hpet_irq); + pr_info("hpet clock event device register\n"); +} + +static cycle_t hpet_read_counter(struct clocksource *cs) +{ + return (cycle_t)hpet_read(HPET_COUNTER); +} + +static void hpet_suspend(struct clocksource *cs) +{ +} + +static void hpet_resume(struct clocksource *cs) +{ + hpet_setup(); + hpet_restart_counter(); +} + +static struct clocksource csrc_hpet = { + .name = "hpet", + /* mips clocksource rating is less than 300, so hpet is better. */ + .rating = 300, + .read = hpet_read_counter, + .mask = CLOCKSOURCE_MASK(32), + /* oneshot mode work normal with this flag */ + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .suspend = hpet_suspend, + .resume = hpet_resume, + .mult = 0, + .shift = 10, +}; + +int __init init_hpet_clocksource(void) +{ + csrc_hpet.mult = clocksource_hz2mult(HPET_FREQ, csrc_hpet.shift); + return clocksource_register_hz(&csrc_hpet, HPET_FREQ); +} + +arch_initcall(init_hpet_clocksource); diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c index ca1c62af5188..21221edda7a9 100644 --- a/arch/mips/loongson/loongson-3/irq.c +++ b/arch/mips/loongson/loongson-3/irq.c @@ -9,7 +9,7 @@ #include "smp.h" -unsigned int ht_irq[] = {1, 3, 4, 5, 6, 7, 8, 12, 14, 15}; +unsigned int ht_irq[] = {0, 1, 3, 4, 5, 6, 7, 8, 12, 14, 15}; static void ht_irqdispatch(void) { @@ -55,8 +55,8 @@ static inline void mask_loongson_irq(struct irq_data *d) /* Workaround: UART IRQ may deliver to any core */ if (d->irq == LOONGSON_UART_IRQ) { int cpu = smp_processor_id(); - int node_id = cpu / loongson_sysconf.cores_per_node; - int core_id = cpu % loongson_sysconf.cores_per_node; + int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; + int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; u64 intenclr_addr = smp_group[node_id] | (u64)(&LOONGSON_INT_ROUTER_INTENCLR); u64 introuter_lpc_addr = smp_group[node_id] | @@ -72,8 +72,8 @@ static inline void unmask_loongson_irq(struct irq_data *d) /* Workaround: UART IRQ may deliver to any core */ if (d->irq == LOONGSON_UART_IRQ) { int cpu = smp_processor_id(); - int node_id = cpu / loongson_sysconf.cores_per_node; - int core_id = cpu % loongson_sysconf.cores_per_node; + int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; + int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; u64 intenset_addr = smp_group[node_id] | (u64)(&LOONGSON_INT_ROUTER_INTENSET); u64 introuter_lpc_addr = smp_group[node_id] | @@ -102,10 +102,12 @@ void irq_router_init(void) int i; /* route LPC int to cpu core0 int 0 */ - LOONGSON_INT_ROUTER_LPC = LOONGSON_INT_CORE0_INT0; + LOONGSON_INT_ROUTER_LPC = + LOONGSON_INT_COREx_INTy(loongson_sysconf.boot_cpu_id, 0); /* route HT1 int0 ~ int7 to cpu core0 INT1*/ for (i = 0; i < 8; i++) - LOONGSON_INT_ROUTER_HT1(i) = LOONGSON_INT_CORE0_INT1; + LOONGSON_INT_ROUTER_HT1(i) = + LOONGSON_INT_COREx_INTy(loongson_sysconf.boot_cpu_id, 1); /* enable HT1 interrupt */ LOONGSON_HT1_INTN_EN(0) = 0xffffffff; /* enable router interrupt intenset */ diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 42323bcc5d28..6cae0e75de27 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c @@ -224,7 +224,7 @@ static void __init node_mem_init(unsigned int node) static __init void prom_meminit(void) { - unsigned int node, cpu; + unsigned int node, cpu, active_cpu = 0; cpu_node_probe(); init_topology_matrix(); @@ -240,8 +240,14 @@ static __init void prom_meminit(void) node = cpu / loongson_sysconf.cores_per_node; if (node >= num_online_nodes()) node = 0; - pr_info("NUMA: set cpumask cpu %d on node %d\n", cpu, node); - cpu_set(cpu, __node_data[(node)]->cpumask); + + if (loongson_sysconf.reserved_cpus_mask & (1<<cpu)) + continue; + + cpu_set(active_cpu, __node_data[(node)]->cpumask); + pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node); + + active_cpu++; } } diff --git a/arch/mips/loongson/loongson-3/platform.c b/arch/mips/loongson/loongson-3/platform.c new file mode 100644 index 000000000000..25a97cc0ee33 --- /dev/null +++ b/arch/mips/loongson/loongson-3/platform.c @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2009 Lemote Inc. + * Author: Wu Zhangjin, wuzhangjin@gmail.com + * Xiang Yu, xiangy@lemote.com + * Chen Huacai, chenhc@lemote.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <asm/bootinfo.h> +#include <boot_param.h> +#include <loongson_hwmon.h> +#include <workarounds.h> + +static int __init loongson3_platform_init(void) +{ + int i; + struct platform_device *pdev; + + if (loongson_sysconf.ecname[0] != '\0') + platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0); + + for (i = 0; i < loongson_sysconf.nr_sensors; i++) { + if (loongson_sysconf.sensors[i].type > SENSOR_FAN) + continue; + + pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); + pdev->name = loongson_sysconf.sensors[i].name; + pdev->id = loongson_sysconf.sensors[i].id; + pdev->dev.platform_data = &loongson_sysconf.sensors[i]; + platform_device_register(pdev); + } + + return 0; +} + +arch_initcall(loongson3_platform_init); diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index d8c63af6c7cc..e2eb688b5434 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c @@ -25,6 +25,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <loongson.h> +#include <workarounds.h> #include "smp.h" @@ -239,7 +240,7 @@ static void ipi_mailbox_buf_init(void) */ static void loongson3_send_ipi_single(int cpu, unsigned int action) { - loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]); + loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]); } static void @@ -248,7 +249,7 @@ loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action) unsigned int i; for_each_cpu(i, mask) - loongson3_ipi_write32((u32)action, ipi_set0_regs[i]); + loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(i)]); } void loongson3_ipi_interrupt(struct pt_regs *regs) @@ -257,10 +258,10 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) unsigned int action, c0count; /* Load the ipi register to figure out what we're supposed to do */ - action = loongson3_ipi_read32(ipi_status0_regs[cpu]); + action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]); /* Clear the ipi register to clear the interrupt */ - loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu]); + loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); @@ -291,12 +292,14 @@ static void loongson3_init_secondary(void) /* Set interrupt mask, but don't enable */ change_c0_status(ST0_IM, imask); - for (i = 0; i < loongson_sysconf.nr_cpus; i++) - loongson3_ipi_write32(0xffffffff, ipi_en0_regs[i]); + for (i = 0; i < num_possible_cpus(); i++) + loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]); - cpu_data[cpu].package = cpu / loongson_sysconf.cores_per_package; - cpu_data[cpu].core = cpu % loongson_sysconf.cores_per_package; per_cpu(cpu_state, cpu) = CPU_ONLINE; + cpu_data[cpu].core = + cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; + cpu_data[cpu].package = + cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; i = 0; __this_cpu_write(core0_c0count, 0); @@ -314,37 +317,50 @@ static void loongson3_init_secondary(void) static void loongson3_smp_finish(void) { + int cpu = smp_processor_id(); + write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); local_irq_enable(); loongson3_ipi_write64(0, - (void *)(ipi_mailbox_buf[smp_processor_id()]+0x0)); + (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0)); pr_info("CPU#%d finished, CP0_ST=%x\n", smp_processor_id(), read_c0_status()); } static void __init loongson3_smp_setup(void) { - int i, num; + int i = 0, num = 0; /* i: physical id, num: logical id */ init_cpu_possible(cpu_none_mask); - set_cpu_possible(0, true); - - __cpu_number_map[0] = 0; - __cpu_logical_map[0] = 0; /* For unified kernel, NR_CPUS is the maximum possible value, * loongson_sysconf.nr_cpus is the really present value */ - for (i = 1, num = 0; i < loongson_sysconf.nr_cpus; i++) { - set_cpu_possible(i, true); - __cpu_number_map[i] = ++num; - __cpu_logical_map[num] = i; + while (i < loongson_sysconf.nr_cpus) { + if (loongson_sysconf.reserved_cpus_mask & (1<<i)) { + /* Reserved physical CPU cores */ + __cpu_number_map[i] = -1; + } else { + __cpu_number_map[i] = num; + __cpu_logical_map[num] = i; + set_cpu_possible(num, true); + num++; + } + i++; } + pr_info("Detected %i available CPU(s)\n", num); + + while (num < loongson_sysconf.nr_cpus) { + __cpu_logical_map[num] = -1; + num++; + } + ipi_set0_regs_init(); ipi_clear0_regs_init(); ipi_status0_regs_init(); ipi_en0_regs_init(); ipi_mailbox_buf_init(); - pr_info("Detected %i available secondary CPU(s)\n", num); + cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; + cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; } static void __init loongson3_prepare_cpus(unsigned int max_cpus) @@ -371,10 +387,14 @@ static void loongson3_boot_secondary(int cpu, struct task_struct *idle) pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n", cpu, startargs[0], startargs[1], startargs[2]); - loongson3_ipi_write64(startargs[3], (void *)(ipi_mailbox_buf[cpu]+0x18)); - loongson3_ipi_write64(startargs[2], (void *)(ipi_mailbox_buf[cpu]+0x10)); - loongson3_ipi_write64(startargs[1], (void *)(ipi_mailbox_buf[cpu]+0x8)); - loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0)); + loongson3_ipi_write64(startargs[3], + (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x18)); + loongson3_ipi_write64(startargs[2], + (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x10)); + loongson3_ipi_write64(startargs[1], + (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8)); + loongson3_ipi_write64(startargs[0], + (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0)); } #ifdef CONFIG_HOTPLUG_CPU @@ -568,7 +588,7 @@ void loongson3_disable_clock(int cpu) if (loongson_sysconf.cputype == Loongson_3A) { LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id)); } else if (loongson_sysconf.cputype == Loongson_3B) { - if (!cpuhotplug_workaround) + if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG)) LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3)); } } @@ -581,7 +601,7 @@ void loongson3_enable_clock(int cpu) if (loongson_sysconf.cputype == Loongson_3A) { LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id); } else if (loongson_sysconf.cputype == Loongson_3B) { - if (!cpuhotplug_workaround) + if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG)) LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3); } } diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig index e23c25d09963..a2b796eaf3c3 100644 --- a/arch/mips/loongson1/Kconfig +++ b/arch/mips/loongson1/Kconfig @@ -5,8 +5,8 @@ choice config LOONGSON1_LS1B bool "Loongson LS1B board" - select CEVT_R4K - select CSRC_R4K + select CEVT_R4K if !MIPS_EXTERNAL_TIMER + select CSRC_R4K if !MIPS_EXTERNAL_TIMER select SYS_HAS_CPU_LOONGSON1B select DMA_NONCOHERENT select BOOT_ELF32 @@ -16,8 +16,46 @@ config LOONGSON1_LS1B select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_MIPS16 select SYS_HAS_EARLY_PRINTK + select USE_GENERIC_EARLY_PRINTK_8250 select COMMON_CLK endchoice +menuconfig CEVT_CSRC_LS1X + bool "Use PWM Timer for clockevent/clocksource" + select MIPS_EXTERNAL_TIMER + depends on CPU_LOONGSON1 + help + This option changes the default clockevent/clocksource to PWM Timer, + and is required by Loongson1 CPUFreq support. + + If unsure, say N. + +choice + prompt "Select clockevent/clocksource" + depends on CEVT_CSRC_LS1X + default TIMER_USE_PWM0 + +config TIMER_USE_PWM0 + bool "Use PWM Timer 0" + help + Use PWM Timer 0 as the default clockevent/clocksourcer. + +config TIMER_USE_PWM1 + bool "Use PWM Timer 1" + help + Use PWM Timer 1 as the default clockevent/clocksourcer. + +config TIMER_USE_PWM2 + bool "Use PWM Timer 2" + help + Use PWM Timer 2 as the default clockevent/clocksourcer. + +config TIMER_USE_PWM3 + bool "Use PWM Timer 3" + help + Use PWM Timer 3 as the default clockevent/clocksourcer. + +endchoice + endif # MACH_LOONGSON1 diff --git a/arch/mips/loongson1/common/Makefile b/arch/mips/loongson1/common/Makefile index b2797709ef5b..723b4ce3b8f0 100644 --- a/arch/mips/loongson1/common/Makefile +++ b/arch/mips/loongson1/common/Makefile @@ -2,4 +2,4 @@ # Makefile for common code of loongson1 based machines. # -obj-y += clock.o irq.o platform.o prom.o reset.o setup.o +obj-y += time.o irq.o platform.o prom.o reset.o setup.o diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c deleted file mode 100644 index b4437f19c3d9..000000000000 --- a/arch/mips/loongson1/common/clock.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/clk.h> -#include <linux/err.h> -#include <asm/time.h> -#include <platform.h> - -void __init plat_time_init(void) -{ - struct clk *clk; - - /* Initialize LS1X clocks */ - ls1x_clk_init(); - - /* setup mips r4k timer */ - clk = clk_get(NULL, "cpu"); - if (IS_ERR(clk)) - panic("unable to get cpu clock, err=%ld", PTR_ERR(clk)); - - mips_hpt_frequency = clk_get_rate(clk) / 2; -} diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c index fdf8cb5987a4..ddf1d4cbf31e 100644 --- a/arch/mips/loongson1/common/platform.c +++ b/arch/mips/loongson1/common/platform.c @@ -16,8 +16,10 @@ #include <linux/usb/ehci_pdriver.h> #include <asm-generic/sizes.h> +#include <cpufreq.h> #include <loongson1.h> +/* 8250/16550 compatible UART */ #define LS1X_UART(_id) \ { \ .mapbase = LS1X_UART ## _id ## _BASE, \ @@ -27,7 +29,7 @@ .type = PORT_16550A, \ } -static struct plat_serial8250_port ls1x_serial8250_port[] = { +static struct plat_serial8250_port ls1x_serial8250_pdata[] = { LS1X_UART(0), LS1X_UART(1), LS1X_UART(2), @@ -35,11 +37,11 @@ static struct plat_serial8250_port ls1x_serial8250_port[] = { {}, }; -struct platform_device ls1x_uart_device = { +struct platform_device ls1x_uart_pdev = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { - .platform_data = ls1x_serial8250_port, + .platform_data = ls1x_serial8250_pdata, }, }; @@ -48,16 +50,97 @@ void __init ls1x_serial_setup(struct platform_device *pdev) struct clk *clk; struct plat_serial8250_port *p; - clk = clk_get(NULL, pdev->name); - if (IS_ERR(clk)) - panic("unable to get %s clock, err=%ld", - pdev->name, PTR_ERR(clk)); + clk = clk_get(&pdev->dev, pdev->name); + if (IS_ERR(clk)) { + pr_err("unable to get %s clock, err=%ld", + pdev->name, PTR_ERR(clk)); + return; + } + clk_prepare_enable(clk); for (p = pdev->dev.platform_data; p->flags != 0; ++p) p->uartclk = clk_get_rate(clk); } +/* CPUFreq */ +static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = { + .clk_name = "cpu_clk", + .osc_clk_name = "osc_33m_clk", + .max_freq = 266 * 1000, + .min_freq = 33 * 1000, +}; + +struct platform_device ls1x_cpufreq_pdev = { + .name = "ls1x-cpufreq", + .dev = { + .platform_data = &ls1x_cpufreq_pdata, + }, +}; + /* Synopsys Ethernet GMAC */ +static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = { + .phy_mask = 0, +}; + +static struct stmmac_dma_cfg ls1x_eth_dma_cfg = { + .pbl = 1, +}; + +int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) +{ + struct plat_stmmacenet_data *plat_dat = NULL; + u32 val; + + val = __raw_readl(LS1X_MUX_CTRL1); + + plat_dat = dev_get_platdata(&pdev->dev); + if (plat_dat->bus_id) { + __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 | + GMAC1_USE_UART0, LS1X_MUX_CTRL0); + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_RGMII: + val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23); + break; + case PHY_INTERFACE_MODE_MII: + val |= (GMAC1_USE_TXCLK | GMAC1_USE_PWM23); + break; + default: + pr_err("unsupported mii mode %d\n", + plat_dat->interface); + return -ENOTSUPP; + } + val &= ~GMAC1_SHUT; + } else { + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_RGMII: + val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01); + break; + case PHY_INTERFACE_MODE_MII: + val |= (GMAC0_USE_TXCLK | GMAC0_USE_PWM01); + break; + default: + pr_err("unsupported mii mode %d\n", + plat_dat->interface); + return -ENOTSUPP; + } + val &= ~GMAC0_SHUT; + } + __raw_writel(val, LS1X_MUX_CTRL1); + + return 0; +} + +static struct plat_stmmacenet_data ls1x_eth0_pdata = { + .bus_id = 0, + .phy_addr = -1, + .interface = PHY_INTERFACE_MODE_MII, + .mdio_bus_data = &ls1x_mdio_bus_data, + .dma_cfg = &ls1x_eth_dma_cfg, + .has_gmac = 1, + .tx_coe = 1, + .init = ls1x_eth_mux_init, +}; + static struct resource ls1x_eth0_resources[] = { [0] = { .start = LS1X_GMAC0_BASE, @@ -71,25 +154,47 @@ static struct resource ls1x_eth0_resources[] = { }, }; -static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = { - .phy_mask = 0, +struct platform_device ls1x_eth0_pdev = { + .name = "stmmaceth", + .id = 0, + .num_resources = ARRAY_SIZE(ls1x_eth0_resources), + .resource = ls1x_eth0_resources, + .dev = { + .platform_data = &ls1x_eth0_pdata, + }, }; -static struct plat_stmmacenet_data ls1x_eth_data = { - .bus_id = 0, +static struct plat_stmmacenet_data ls1x_eth1_pdata = { + .bus_id = 1, .phy_addr = -1, + .interface = PHY_INTERFACE_MODE_MII, .mdio_bus_data = &ls1x_mdio_bus_data, + .dma_cfg = &ls1x_eth_dma_cfg, .has_gmac = 1, .tx_coe = 1, + .init = ls1x_eth_mux_init, }; -struct platform_device ls1x_eth0_device = { +static struct resource ls1x_eth1_resources[] = { + [0] = { + .start = LS1X_GMAC1_BASE, + .end = LS1X_GMAC1_BASE + SZ_64K - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "macirq", + .start = LS1X_GMAC1_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device ls1x_eth1_pdev = { .name = "stmmaceth", - .id = 0, - .num_resources = ARRAY_SIZE(ls1x_eth0_resources), - .resource = ls1x_eth0_resources, + .id = 1, + .num_resources = ARRAY_SIZE(ls1x_eth1_resources), + .resource = ls1x_eth1_resources, .dev = { - .platform_data = &ls1x_eth_data, + .platform_data = &ls1x_eth1_pdata, }, }; @@ -111,7 +216,7 @@ static struct resource ls1x_ehci_resources[] = { static struct usb_ehci_pdata ls1x_ehci_pdata = { }; -struct platform_device ls1x_ehci_device = { +struct platform_device ls1x_ehci_pdev = { .name = "ehci-platform", .id = -1, .num_resources = ARRAY_SIZE(ls1x_ehci_resources), @@ -123,7 +228,7 @@ struct platform_device ls1x_ehci_device = { }; /* Real Time Clock */ -struct platform_device ls1x_rtc_device = { +struct platform_device ls1x_rtc_pdev = { .name = "ls1x-rtc", .id = -1, }; diff --git a/arch/mips/loongson1/common/prom.c b/arch/mips/loongson1/common/prom.c index 2a47af5a55c3..68600980ea49 100644 --- a/arch/mips/loongson1/common/prom.c +++ b/arch/mips/loongson1/common/prom.c @@ -27,7 +27,7 @@ char *prom_getenv(char *envname) i = strlen(envname); while (*env) { - if (strncmp(envname, *env, i) == 0 && *(*env+i) == '=') + if (strncmp(envname, *env, i) == 0 && *(*env + i) == '=') return *env + i + 1; env++; } @@ -49,7 +49,7 @@ void __init prom_init_cmdline(void) for (i = 1; i < prom_argc; i++) { strcpy(c, prom_argv[i]); c += strlen(prom_argv[i]); - if (i < prom_argc-1) + if (i < prom_argc - 1) *c++ = ' '; } *c = 0; @@ -57,6 +57,7 @@ void __init prom_init_cmdline(void) void __init prom_init(void) { + void __iomem *uart_base; prom_argc = fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; @@ -65,23 +66,18 @@ void __init prom_init(void) memsize = env_or_default("memsize", DEFAULT_MEMSIZE); highmemsize = env_or_default("highmemsize", 0x0); -} -void __init prom_free_prom_memory(void) -{ + if (strstr(arcs_cmdline, "console=ttyS3")) + uart_base = ioremap_nocache(LS1X_UART3_BASE, 0x0f); + else if (strstr(arcs_cmdline, "console=ttyS2")) + uart_base = ioremap_nocache(LS1X_UART2_BASE, 0x0f); + else if (strstr(arcs_cmdline, "console=ttyS1")) + uart_base = ioremap_nocache(LS1X_UART1_BASE, 0x0f); + else + uart_base = ioremap_nocache(LS1X_UART0_BASE, 0x0f); + setup_8250_early_printk_port((unsigned long)uart_base, 0, 0); } -#define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset)) - -void prom_putchar(char c) +void __init prom_free_prom_memory(void) { - int timeout; - - timeout = 1024; - - while (((readb(PORT(UART_LSR)) & UART_LSR_THRE) == 0) - && (timeout-- > 0)) - ; - - writeb(c, PORT(UART_TX)); } diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c index 547f34b69e4c..c41e4ca56ab4 100644 --- a/arch/mips/loongson1/common/reset.c +++ b/arch/mips/loongson1/common/reset.c @@ -14,12 +14,7 @@ #include <loongson1.h> -static void ls1x_restart(char *command) -{ - __raw_writel(0x1, LS1X_WDT_EN); - __raw_writel(0x5000000, LS1X_WDT_TIMER); - __raw_writel(0x1, LS1X_WDT_SET); -} +static void __iomem *wdt_base; static void ls1x_halt(void) { @@ -29,6 +24,15 @@ static void ls1x_halt(void) } } +static void ls1x_restart(char *command) +{ + __raw_writel(0x1, wdt_base + WDT_EN); + __raw_writel(0x1, wdt_base + WDT_TIMER); + __raw_writel(0x1, wdt_base + WDT_SET); + + ls1x_halt(); +} + static void ls1x_power_off(void) { ls1x_halt(); @@ -36,6 +40,10 @@ static void ls1x_power_off(void) static int __init ls1x_reboot_setup(void) { + wdt_base = ioremap_nocache(LS1X_WDT_BASE, 0x0f); + if (!wdt_base) + panic("Failed to remap watchdog registers"); + _machine_restart = ls1x_restart; _machine_halt = ls1x_halt; pm_power_off = ls1x_power_off; diff --git a/arch/mips/loongson1/common/time.c b/arch/mips/loongson1/common/time.c new file mode 100644 index 000000000000..df0f850d6a5f --- /dev/null +++ b/arch/mips/loongson1/common/time.c @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <asm/time.h> + +#include <loongson1.h> +#include <platform.h> + +#ifdef CONFIG_CEVT_CSRC_LS1X + +#if defined(CONFIG_TIMER_USE_PWM1) +#define LS1X_TIMER_BASE LS1X_PWM1_BASE +#define LS1X_TIMER_IRQ LS1X_PWM1_IRQ + +#elif defined(CONFIG_TIMER_USE_PWM2) +#define LS1X_TIMER_BASE LS1X_PWM2_BASE +#define LS1X_TIMER_IRQ LS1X_PWM2_IRQ + +#elif defined(CONFIG_TIMER_USE_PWM3) +#define LS1X_TIMER_BASE LS1X_PWM3_BASE +#define LS1X_TIMER_IRQ LS1X_PWM3_IRQ + +#else +#define LS1X_TIMER_BASE LS1X_PWM0_BASE +#define LS1X_TIMER_IRQ LS1X_PWM0_IRQ +#endif + +DEFINE_RAW_SPINLOCK(ls1x_timer_lock); + +static void __iomem *timer_base; +static uint32_t ls1x_jiffies_per_tick; + +static inline void ls1x_pwmtimer_set_period(uint32_t period) +{ + __raw_writel(period, timer_base + PWM_HRC); + __raw_writel(period, timer_base + PWM_LRC); +} + +static inline void ls1x_pwmtimer_restart(void) +{ + __raw_writel(0x0, timer_base + PWM_CNT); + __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL); +} + +void __init ls1x_pwmtimer_init(void) +{ + timer_base = ioremap(LS1X_TIMER_BASE, 0xf); + if (!timer_base) + panic("Failed to remap timer registers"); + + ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ); + + ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick); + ls1x_pwmtimer_restart(); +} + +static cycle_t ls1x_clocksource_read(struct clocksource *cs) +{ + unsigned long flags; + int count; + u32 jifs; + static int old_count; + static u32 old_jifs; + + raw_spin_lock_irqsave(&ls1x_timer_lock, flags); + /* + * Although our caller may have the read side of xtime_lock, + * this is now a seqlock, and we are cheating in this routine + * by having side effects on state that we cannot undo if + * there is a collision on the seqlock and our caller has to + * retry. (Namely, old_jifs and old_count.) So we must treat + * jiffies as volatile despite the lock. We read jiffies + * before latching the timer count to guarantee that although + * the jiffies value might be older than the count (that is, + * the counter may underflow between the last point where + * jiffies was incremented and the point where we latch the + * count), it cannot be newer. + */ + jifs = jiffies; + /* read the count */ + count = __raw_readl(timer_base + PWM_CNT); + + /* + * It's possible for count to appear to go the wrong way for this + * reason: + * + * The timer counter underflows, but we haven't handled the resulting + * interrupt and incremented jiffies yet. + * + * Previous attempts to handle these cases intelligently were buggy, so + * we just do the simple thing now. + */ + if (count < old_count && jifs == old_jifs) + count = old_count; + + old_count = count; + old_jifs = jifs; + + raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags); + + return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count; +} + +static struct clocksource ls1x_clocksource = { + .name = "ls1x-pwmtimer", + .read = ls1x_clocksource_read, + .mask = CLOCKSOURCE_MASK(24), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static irqreturn_t ls1x_clockevent_isr(int irq, void *devid) +{ + struct clock_event_device *cd = devid; + + ls1x_pwmtimer_restart(); + cd->event_handler(cd); + + return IRQ_HANDLED; +} + +static void ls1x_clockevent_set_mode(enum clock_event_mode mode, + struct clock_event_device *cd) +{ + raw_spin_lock(&ls1x_timer_lock); + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick); + ls1x_pwmtimer_restart(); + case CLOCK_EVT_MODE_RESUME: + __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_SHUTDOWN: + __raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN, + timer_base + PWM_CTRL); + break; + default: + break; + } + raw_spin_unlock(&ls1x_timer_lock); +} + +static int ls1x_clockevent_set_next(unsigned long evt, + struct clock_event_device *cd) +{ + raw_spin_lock(&ls1x_timer_lock); + ls1x_pwmtimer_set_period(evt); + ls1x_pwmtimer_restart(); + raw_spin_unlock(&ls1x_timer_lock); + + return 0; +} + +static struct clock_event_device ls1x_clockevent = { + .name = "ls1x-pwmtimer", + .features = CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .irq = LS1X_TIMER_IRQ, + .set_next_event = ls1x_clockevent_set_next, + .set_mode = ls1x_clockevent_set_mode, +}; + +static struct irqaction ls1x_pwmtimer_irqaction = { + .name = "ls1x-pwmtimer", + .handler = ls1x_clockevent_isr, + .dev_id = &ls1x_clockevent, + .flags = IRQF_PERCPU | IRQF_TIMER, +}; + +static void __init ls1x_time_init(void) +{ + struct clock_event_device *cd = &ls1x_clockevent; + int ret; + + if (!mips_hpt_frequency) + panic("Invalid timer clock rate"); + + ls1x_pwmtimer_init(); + + clockevent_set_clock(cd, mips_hpt_frequency); + cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x000300, cd); + cd->cpumask = cpumask_of(smp_processor_id()); + clockevents_register_device(cd); + + ls1x_clocksource.rating = 200 + mips_hpt_frequency / 10000000; + ret = clocksource_register_hz(&ls1x_clocksource, mips_hpt_frequency); + if (ret) + panic(KERN_ERR "Failed to register clocksource: %d\n", ret); + + setup_irq(LS1X_TIMER_IRQ, &ls1x_pwmtimer_irqaction); +} +#endif /* CONFIG_CEVT_CSRC_LS1X */ + +void __init plat_time_init(void) +{ + struct clk *clk = NULL; + + /* initialize LS1X clocks */ + ls1x_clk_init(); + +#ifdef CONFIG_CEVT_CSRC_LS1X + /* setup LS1X PWM timer */ + clk = clk_get(NULL, "ls1x_pwmtimer"); + if (IS_ERR(clk)) + panic("unable to get timer clock, err=%ld", PTR_ERR(clk)); + + mips_hpt_frequency = clk_get_rate(clk); + ls1x_time_init(); +#else + /* setup mips r4k timer */ + clk = clk_get(NULL, "cpu_clk"); + if (IS_ERR(clk)) + panic("unable to get cpu clock, err=%ld", PTR_ERR(clk)); + + mips_hpt_frequency = clk_get_rate(clk) / 2; +#endif /* CONFIG_CEVT_CSRC_LS1X */ +} diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c index b26b10dac70a..58daeea25739 100644 --- a/arch/mips/loongson1/ls1b/board.c +++ b/arch/mips/loongson1/ls1b/board.c @@ -10,17 +10,19 @@ #include <platform.h> static struct platform_device *ls1b_platform_devices[] __initdata = { - &ls1x_uart_device, - &ls1x_eth0_device, - &ls1x_ehci_device, - &ls1x_rtc_device, + &ls1x_uart_pdev, + &ls1x_cpufreq_pdev, + &ls1x_eth0_pdev, + &ls1x_eth1_pdev, + &ls1x_ehci_pdev, + &ls1x_rtc_pdev, }; static int __init ls1b_platform_init(void) { int err; - ls1x_serial_setup(&ls1x_uart_device); + ls1x_serial_setup(&ls1x_uart_pdev); err = platform_add_devices(ls1b_platform_devices, ARRAY_SIZE(ls1b_platform_devices)); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index cac529a405b8..9dfcd7fc1bc3 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -643,9 +643,14 @@ static inline int cop1_64bit(struct pt_regs *xcp) return !test_thread_flag(TIF_32BIT_FPREGS); } +static inline bool hybrid_fprs(void) +{ + return test_thread_flag(TIF_HYBRID_FPREGS); +} + #define SIFROMREG(si, x) \ do { \ - if (cop1_64bit(xcp)) \ + if (cop1_64bit(xcp) && !hybrid_fprs()) \ (si) = (int)get_fpr32(&ctx->fpr[x], 0); \ else \ (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ @@ -653,7 +658,7 @@ do { \ #define SITOREG(si, x) \ do { \ - if (cop1_64bit(xcp)) { \ + if (cop1_64bit(xcp) && !hybrid_fprs()) { \ unsigned i; \ set_fpr32(&ctx->fpr[x], 0, si); \ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \ diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c index fd134675fc2e..068f45a415fc 100644 --- a/arch/mips/math-emu/ieee754dp.c +++ b/arch/mips/math-emu/ieee754dp.c @@ -38,7 +38,7 @@ int ieee754dp_isnan(union ieee754dp x) static inline int ieee754dp_issnan(union ieee754dp x) { assert(ieee754dp_isnan(x)); - return ((DPMANT(x) & DP_MBIT(DP_FBITS-1)) == DP_MBIT(DP_FBITS-1)); + return (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1); } diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c index d348efe91445..ba88301579c2 100644 --- a/arch/mips/math-emu/ieee754sp.c +++ b/arch/mips/math-emu/ieee754sp.c @@ -38,7 +38,7 @@ int ieee754sp_isnan(union ieee754sp x) static inline int ieee754sp_issnan(union ieee754sp x) { assert(ieee754sp_isnan(x)); - return (SPMANT(x) & SP_MBIT(SP_FBITS-1)); + return SPMANT(x) & SP_MBIT(SP_FBITS - 1); } diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 7f4f93ab22b7..67ede4ef9b8d 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,7 +4,13 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o + tlbex.o tlbex-fault.o tlb-funcs.o + +ifdef CONFIG_CPU_MICROMIPS +obj-y += uasm-micromips.o +else +obj-y += uasm-mips.o +endif obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -22,5 +28,3 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o - -obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index fbcd8674ff1d..dd261df005c2 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -917,6 +917,18 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c) } } +static void b5k_instruction_hazard(void) +{ + __sync(); + __sync(); + __asm__ __volatile__( + " nop; nop; nop; nop; nop; nop; nop; nop\n" + " nop; nop; nop; nop; nop; nop; nop; nop\n" + " nop; nop; nop; nop; nop; nop; nop; nop\n" + " nop; nop; nop; nop; nop; nop; nop; nop\n" + : : : "memory"); +} + static char *way_string[] = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; @@ -1683,6 +1695,37 @@ void r4k_cache_init(void) coherency_setup(); board_cache_error_setup = r4k_cache_error_setup; + + /* + * Per-CPU overrides + */ + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + /* No IPI is needed because all CPUs share the same D$ */ + flush_data_cache_page = r4k_blast_dcache_page; + break; + case CPU_BMIPS5000: + /* We lose our superpowers if L2 is disabled */ + if (c->scache.flags & MIPS_CACHE_NOT_PRESENT) + break; + + /* I$ fills from D$ just by emptying the write buffers */ + flush_cache_page = (void *)b5k_instruction_hazard; + flush_cache_range = (void *)b5k_instruction_hazard; + flush_cache_sigtramp = (void *)b5k_instruction_hazard; + local_flush_data_cache_page = (void *)b5k_instruction_hazard; + flush_data_cache_page = (void *)b5k_instruction_hazard; + flush_icache_range = (void *)b5k_instruction_hazard; + local_flush_icache_range = (void *)b5k_instruction_hazard; + + /* Cache aliases are handled in hardware; allow HIGHMEM */ + current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES; + + /* Optimization: an L2 flush implicitly flushes the L1 */ + current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES; + break; + } } static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 33ba3c558fe4..af5f046e627e 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -61,6 +61,11 @@ static inline struct page *dma_addr_to_page(struct device *dev, * Warning on the terminology - Linux calls an uncached area coherent; * MIPS terminology calls memory areas with hardware maintained coherency * coherent. + * + * Note that the R14000 and R16000 should also be checked for in this + * condition. However this function is only called on non-I/O-coherent + * systems and only the R10000 and R12000 are used in such systems, the + * SGI IP28 Indigo² rsp. SGI IP32 aka O2. */ static inline int cpu_needs_post_dma_flush(struct device *dev) { diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 06ce17c2a905..7cba480568c8 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -17,7 +17,7 @@ static inline pte_t gup_get_pte(pte_t *ptep) { -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) pte_t pte; retry: diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index f42e35e42790..448cde372af0 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -95,7 +95,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) idx += in_interrupt() ? FIX_N_COLOURS : 0; vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, prot); -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) entrylo = pte.pte_high; #else entrylo = pte_to_entrylo(pte_val(pte)); diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 7f840bc08abf..8d5008cbdc0f 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -17,9 +17,9 @@ #include <asm/tlbflush.h> static inline void remap_area_pte(pte_t * pte, unsigned long address, - phys_t size, phys_t phys_addr, unsigned long flags) + phys_addr_t size, phys_addr_t phys_addr, unsigned long flags) { - phys_t end; + phys_addr_t end; unsigned long pfn; pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE | flags); @@ -43,9 +43,9 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, } static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, - phys_t size, phys_t phys_addr, unsigned long flags) + phys_addr_t size, phys_addr_t phys_addr, unsigned long flags) { - phys_t end; + phys_addr_t end; address &= ~PGDIR_MASK; end = address + size; @@ -64,8 +64,8 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, return 0; } -static int remap_area_pages(unsigned long address, phys_t phys_addr, - phys_t size, unsigned long flags) +static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, + phys_addr_t size, unsigned long flags) { int error; pgd_t * dir; @@ -111,13 +111,13 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr, * caller shouldn't need to know that small detail. */ -#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) +#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) -void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) +void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) { struct vm_struct * area; unsigned long offset; - phys_t last_addr; + phys_addr_t last_addr; void * addr; phys_addr = fixup_bigphys_addr(phys_addr, size); diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c index 0216ed6eaa2a..751b5cd18bf2 100644 --- a/arch/mips/mm/sc-r5k.c +++ b/arch/mips/mm/sc-r5k.c @@ -81,7 +81,7 @@ static inline int __init r5k_sc_probe(void) unsigned long config = read_c0_config(); if (config & CONF_SC) - return(0); + return 0; scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c3917e251f59..e90b2e899291 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -332,7 +332,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) { ptep = pte_offset_map(pmdp, address); -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) write_c0_entrylo0(ptep->pte_high); ptep++; write_c0_entrylo1(ptep->pte_high); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index e3328a96e809..3978a3d81366 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -637,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, if (cpu_has_rixi) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); #else UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -1009,7 +1009,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) * 64bit address support (36bit on a 32bit CPU) in a 32bit * Kernel is a special case. Only a few CPUs use it. */ -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) { uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ @@ -1510,14 +1510,14 @@ static void iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP -# ifdef CONFIG_64BIT_PHYS_ADDR +# ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_lld(p, pte, 0, ptr); else # endif UASM_i_LL(p, pte, 0, ptr); #else -# ifdef CONFIG_64BIT_PHYS_ADDR +# ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_ld(p, pte, 0, ptr); else @@ -1530,13 +1530,13 @@ static void iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, unsigned int mode) { -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); #endif uasm_i_ori(p, pte, pte, mode); #ifdef CONFIG_SMP -# ifdef CONFIG_64BIT_PHYS_ADDR +# ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_scd(p, pte, 0, ptr); else @@ -1548,7 +1548,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, else uasm_il_beqz(p, r, pte, label_smp_pgtable_change); -# ifdef CONFIG_64BIT_PHYS_ADDR +# ifdef CONFIG_PHYS_ADDR_T_64BIT if (!cpu_has_64bits) { /* no uasm_i_nop needed */ uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); @@ -1563,14 +1563,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, uasm_i_nop(p); # endif #else -# ifdef CONFIG_64BIT_PHYS_ADDR +# ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_sd(p, pte, 0, ptr); else # endif UASM_i_SW(p, pte, 0, ptr); -# ifdef CONFIG_64BIT_PHYS_ADDR +# ifdef CONFIG_PHYS_ADDR_T_64BIT if (!cpu_has_64bits) { uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 6708a2dbf934..8e02291cfc0c 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -96,9 +96,11 @@ static struct insn insn_table[] = { { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_mfhc0, M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET}, { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD }, { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD }, { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET}, { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index a01b0d6cedd2..4adf30284813 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -51,12 +51,12 @@ enum opcode { insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, - insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, - insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, - insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, - insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, - insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, - insn_xor, insn_xori, insn_yield, + insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0, + insn_mthc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe, + insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sllv, insn_slt, + insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu, + insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, + insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield, }; struct insn { @@ -284,9 +284,11 @@ I_u2s3u1(_lld) I_u1s2(_lui) I_u2s3u1(_lw) I_u1u2u3(_mfc0) +I_u1u2u3(_mfhc0) I_u1(_mfhi) I_u1(_mflo) I_u1u2u3(_mtc0) +I_u1u2u3(_mthc0) I_u3u1u2(_mul) I_u2u1u3(_ori) I_u3u1u2(_or) diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 0f60256d3784..6849f533154f 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -111,7 +111,7 @@ static void __init mips_ejtag_setup(void) flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); } -phys_t mips_cpc_default_phys_base(void) +phys_addr_t mips_cpc_default_phys_base(void) { return CPC_BASE_ADDR; } diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index e4f43baa8f67..d1392f8f5811 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -18,6 +18,7 @@ #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/irqchip/mips-gic.h> #include <linux/kernel_stat.h> #include <linux/kernel.h> #include <linux/random.h> @@ -33,19 +34,13 @@ #include <asm/mips-boards/generic.h> #include <asm/mips-boards/msc01_pci.h> #include <asm/msc01_ic.h> -#include <asm/gic.h> #include <asm/setup.h> #include <asm/rtlx.h> -static unsigned long _msc01_biu_base; -static unsigned int ipi_map[NR_CPUS]; +static void __iomem *_msc01_biu_base; static DEFINE_RAW_SPINLOCK(mips_irq_lock); -#ifdef CONFIG_MIPS_GIC_IPI -DECLARE_BITMAP(ipi_ints, GIC_NUM_INTRS); -#endif - static inline int mips_pcibios_iack(void) { int irq; @@ -127,24 +122,10 @@ static void malta_hw0_irqdispatch(void) #endif } -static void malta_ipi_irqdispatch(void) +static irqreturn_t i8259_handler(int irq, void *dev_id) { -#ifdef CONFIG_MIPS_GIC_IPI - unsigned long irq; - DECLARE_BITMAP(pending, GIC_NUM_INTRS); - - gic_get_int_mask(pending, ipi_ints); - - irq = find_first_bit(pending, GIC_NUM_INTRS); - - while (irq < GIC_NUM_INTRS) { - do_IRQ(MIPS_GIC_IRQ_BASE + irq); - - irq = find_next_bit(pending, GIC_NUM_INTRS, irq + 1); - } -#endif - if (gic_compare_int()) - do_IRQ(MIPS_GIC_IRQ_BASE); + malta_hw0_irqdispatch(); + return IRQ_HANDLED; } static void corehi_irqdispatch(void) @@ -203,95 +184,10 @@ static void corehi_irqdispatch(void) die("CoreHi interrupt", regs); } -static inline int clz(unsigned long x) -{ - __asm__( - " .set push \n" - " .set mips32 \n" - " clz %0, %1 \n" - " .set pop \n" - : "=r" (x) - : "r" (x)); - - return x; -} - -/* - * Version of ffs that only looks at bits 12..15. - */ -static inline unsigned int irq_ffs(unsigned int pending) -{ -#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) - return -clz(pending) + 31 - CAUSEB_IP; -#else - unsigned int a0 = 7; - unsigned int t0; - - t0 = pending & 0xf000; - t0 = t0 < 1; - t0 = t0 << 2; - a0 = a0 - t0; - pending = pending << t0; - - t0 = pending & 0xc000; - t0 = t0 < 1; - t0 = t0 << 1; - a0 = a0 - t0; - pending = pending << t0; - - t0 = pending & 0x8000; - t0 = t0 < 1; - /* t0 = t0 << 2; */ - a0 = a0 - t0; - /* pending = pending << t0; */ - - return a0; -#endif -} - -/* - * IRQs on the Malta board look basically (barring software IRQs which we - * don't use at all and all external interrupt sources are combined together - * on hardware interrupt 0 (MIPS IRQ 2)) like: - * - * MIPS IRQ Source - * -------- ------ - * 0 Software (ignored) - * 1 Software (ignored) - * 2 Combined hardware interrupt (hw0) - * 3 Hardware (ignored) - * 4 Hardware (ignored) - * 5 Hardware (ignored) - * 6 Hardware (ignored) - * 7 R4k timer (what we use) - * - * We handle the IRQ according to _our_ priority which is: - * - * Highest ---- R4k Timer - * Lowest ---- Combined hardware interrupt - * - * then we just return, if multiple IRQs are pending then we will just take - * another exception, big deal. - */ - -asmlinkage void plat_irq_dispatch(void) +static irqreturn_t corehi_handler(int irq, void *dev_id) { - unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; - int irq; - - if (unlikely(!pending)) { - spurious_interrupt(); - return; - } - - irq = irq_ffs(pending); - - if (irq == MIPSCPU_INT_I8259A) - malta_hw0_irqdispatch(); - else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()])) - malta_ipi_irqdispatch(); - else - do_IRQ(MIPS_CPU_IRQ_BASE + irq); + corehi_irqdispatch(); + return IRQ_HANDLED; } #ifdef CONFIG_MIPS_MT_SMP @@ -312,13 +208,6 @@ static void ipi_call_dispatch(void) do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ); } -#endif /* CONFIG_MIPS_MT_SMP */ - -#ifdef CONFIG_MIPS_GIC_IPI - -#define GIC_MIPS_CPU_IPI_RESCHED_IRQ 3 -#define GIC_MIPS_CPU_IPI_CALL_IRQ 4 - static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) { #ifdef CONFIG_MIPS_VPE_APSP_API_CMP @@ -349,31 +238,16 @@ static struct irqaction irq_call = { .flags = IRQF_PERCPU, .name = "IPI_call" }; -#endif /* CONFIG_MIPS_GIC_IPI */ - -static int gic_resched_int_base; -static int gic_call_int_base; -#define GIC_RESCHED_INT(cpu) (gic_resched_int_base+(cpu)) -#define GIC_CALL_INT(cpu) (gic_call_int_base+(cpu)) - -unsigned int plat_ipi_call_int_xlate(unsigned int cpu) -{ - return GIC_CALL_INT(cpu); -} - -unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) -{ - return GIC_RESCHED_INT(cpu); -} +#endif /* CONFIG_MIPS_MT_SMP */ static struct irqaction i8259irq = { - .handler = no_action, + .handler = i8259_handler, .name = "XT-PIC cascade", .flags = IRQF_NO_THREAD, }; static struct irqaction corehi_irqaction = { - .handler = no_action, + .handler = corehi_handler, .name = "CoreHi", .flags = IRQF_NO_THREAD, }; @@ -399,60 +273,6 @@ static msc_irqmap_t msc_eicirqmap[] __initdata = { static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap); -/* - * This GIC specific tabular array defines the association between External - * Interrupts and CPUs/Core Interrupts. The nature of the External - * Interrupts is also defined here - polarity/trigger. - */ - -#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK -#define X GIC_UNUSED - -static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { - { X, X, X, X, 0 }, - { X, X, X, X, 0 }, - { X, X, X, X, 0 }, - { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { X, X, X, X, 0 }, - { X, X, X, X, 0 }, - { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { X, X, X, X, 0 }, - /* The remainder of this table is initialised by fill_ipi_map */ -}; -#undef X - -#ifdef CONFIG_MIPS_GIC_IPI -static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin) -{ - int intr = baseintr + cpu; - gic_intr_map[intr].cpunum = cpu; - gic_intr_map[intr].pin = cpupin; - gic_intr_map[intr].polarity = GIC_POL_POS; - gic_intr_map[intr].trigtype = GIC_TRIG_EDGE; - gic_intr_map[intr].flags = 0; - ipi_map[cpu] |= (1 << (cpupin + 2)); - bitmap_set(ipi_ints, intr, 1); -} - -static void __init fill_ipi_map(void) -{ - int cpu; - - for (cpu = 0; cpu < nr_cpu_ids; cpu++) { - fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1); - fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2); - } -} -#endif - void __init arch_init_ipiirq(int irq, struct irqaction *action) { setup_irq(irq, action); @@ -461,6 +281,8 @@ void __init arch_init_ipiirq(int irq, struct irqaction *action) void __init arch_init_irq(void) { + int corehi_irq, i8259_irq; + init_i8259_irqs(); if (!cpu_has_veic) @@ -471,12 +293,12 @@ void __init arch_init_irq(void) gic_present = 1; } else { if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) { - _msc01_biu_base = (unsigned long) - ioremap_nocache(MSC01_BIU_REG_BASE, + _msc01_biu_base = ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ); - gic_present = (REG(_msc01_biu_base, MSC01_SC_CFG) & - MSC01_SC_CFG_GICPRES_MSK) >> - MSC01_SC_CFG_GICPRES_SHF; + gic_present = + (__raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS) & + MSC01_SC_CFG_GICPRES_MSK) >> + MSC01_SC_CFG_GICPRES_SHF; } } if (gic_present) @@ -507,63 +329,20 @@ void __init arch_init_irq(void) msc_nr_irqs); } - if (cpu_has_veic) { - set_vi_handler(MSC01E_INT_I8259A, malta_hw0_irqdispatch); - set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch); - setup_irq(MSC01E_INT_BASE+MSC01E_INT_I8259A, &i8259irq); - setup_irq(MSC01E_INT_BASE+MSC01E_INT_COREHI, &corehi_irqaction); - } else if (cpu_has_vint) { - set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); - set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); - setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); - setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, - &corehi_irqaction); - } else { - setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); - setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, - &corehi_irqaction); - } - if (gic_present) { - /* FIXME */ int i; -#if defined(CONFIG_MIPS_GIC_IPI) - gic_call_int_base = GIC_NUM_INTRS - - (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids; - gic_resched_int_base = gic_call_int_base - nr_cpu_ids; - fill_ipi_map(); -#endif - gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, - ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); + + gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, MIPSCPU_INT_GIC, + MIPS_GIC_IRQ_BASE); if (!mips_cm_present()) { /* Enable the GIC */ - i = REG(_msc01_biu_base, MSC01_SC_CFG); - REG(_msc01_biu_base, MSC01_SC_CFG) = - (i | (0x1 << MSC01_SC_CFG_GICENA_SHF)); + i = __raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS); + __raw_writel(i | (0x1 << MSC01_SC_CFG_GICENA_SHF), + _msc01_biu_base + MSC01_SC_CFG_OFS); pr_debug("GIC Enabled\n"); } -#if defined(CONFIG_MIPS_GIC_IPI) - /* set up ipi interrupts */ - if (cpu_has_vint) { - set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch); - set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch); - } - /* Argh.. this really needs sorting out.. */ - pr_info("CPU%d: status register was %08x\n", - smp_processor_id(), read_c0_status()); - write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4); - pr_info("CPU%d: status register now %08x\n", - smp_processor_id(), read_c0_status()); - write_c0_status(0x1100dc00); - pr_info("CPU%d: status register frc %08x\n", - smp_processor_id(), read_c0_status()); - for (i = 0; i < nr_cpu_ids; i++) { - arch_init_ipiirq(MIPS_GIC_IRQ_BASE + - GIC_RESCHED_INT(i), &irq_resched); - arch_init_ipiirq(MIPS_GIC_IRQ_BASE + - GIC_CALL_INT(i), &irq_call); - } -#endif + i8259_irq = MIPS_GIC_IRQ_BASE + GIC_INT_I8259A; + corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI; } else { #if defined(CONFIG_MIPS_MT_SMP) /* set up ipi interrupts */ @@ -573,12 +352,6 @@ void __init arch_init_irq(void) cpu_ipi_resched_irq = MSC01E_INT_SW0; cpu_ipi_call_irq = MSC01E_INT_SW1; } else { - if (cpu_has_vint) { - set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, - ipi_resched_dispatch); - set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, - ipi_call_dispatch); - } cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + @@ -587,7 +360,21 @@ void __init arch_init_irq(void) arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched); arch_init_ipiirq(cpu_ipi_call_irq, &irq_call); #endif + if (cpu_has_veic) { + set_vi_handler(MSC01E_INT_I8259A, + malta_hw0_irqdispatch); + set_vi_handler(MSC01E_INT_COREHI, + corehi_irqdispatch); + i8259_irq = MSC01E_INT_BASE + MSC01E_INT_I8259A; + corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI; + } else { + i8259_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_I8259A; + corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI; + } } + + setup_irq(i8259_irq, &i8259irq); + setup_irq(corehi_irq, &corehi_irqaction); } void malta_be_init(void) @@ -714,37 +501,3 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup) return retval; } - -void gic_enable_interrupt(int irq_vec) -{ - GIC_SET_INTR_MASK(irq_vec); -} - -void gic_disable_interrupt(int irq_vec) -{ - GIC_CLR_INTR_MASK(irq_vec); -} - -void gic_irq_ack(struct irq_data *d) -{ - int irq = (d->irq - gic_irq_base); - - GIC_CLR_INTR_MASK(irq); - - if (gic_irq_flags[irq] & GIC_TRIG_EDGE) - GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); -} - -void gic_finish_irq(struct irq_data *d) -{ - /* Enable interrupts. */ - GIC_SET_INTR_MASK(d->irq - gic_irq_base); -} - -void __init gic_platform_init(int irqs, struct irq_chip *irq_controller) -{ - int i; - - for (i = gic_irq_base; i < (gic_irq_base + irqs); i++) - irq_set_chip(i, irq_controller); -} diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 3778a359f3ad..ce02dbdedc62 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/irqchip/mips-gic.h> #include <linux/timex.h> #include <linux/mc146818rtc.h> @@ -37,7 +38,6 @@ #include <asm/time.h> #include <asm/mc146818-time.h> #include <asm/msc01_ic.h> -#include <asm/gic.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/maltaint.h> @@ -46,6 +46,8 @@ static int mips_cpu_timer_irq; static int mips_cpu_perf_irq; extern int cp0_perfcount_irq; +static unsigned int gic_frequency; + static void mips_timer_dispatch(void) { do_IRQ(mips_cpu_timer_irq); @@ -70,9 +72,7 @@ static void __init estimate_frequencies(void) { unsigned long flags; unsigned int count, start; -#ifdef CONFIG_IRQ_GIC - unsigned int giccount = 0, gicstart = 0; -#endif + cycle_t giccount = 0, gicstart = 0; #if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; @@ -87,32 +87,26 @@ static void __init estimate_frequencies(void) /* Initialize counters. */ start = read_c0_count(); -#ifdef CONFIG_IRQ_GIC if (gic_present) - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); -#endif + gicstart = gic_read_count(); /* Read counter exactly on falling edge of update flag. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); -#ifdef CONFIG_IRQ_GIC if (gic_present) - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); -#endif + giccount = gic_read_count(); local_irq_restore(flags); count -= start; mips_hpt_frequency = count; -#ifdef CONFIG_IRQ_GIC if (gic_present) { giccount -= gicstart; gic_frequency = giccount; } -#endif } void read_persistent_clock(struct timespec *ts) @@ -121,35 +115,30 @@ void read_persistent_clock(struct timespec *ts) ts->tv_nsec = 0; } -static void __init plat_perf_setup(void) +int get_c0_perfcount_int(void) { -#ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch); mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; - } else -#endif - if (cp0_perfcount_irq >= 0) { - if (cpu_has_vint) - set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); + } else if (gic_present) { + mips_cpu_perf_irq = gic_get_c0_perfcount_int(); + } else if (cp0_perfcount_irq >= 0) { mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; -#ifdef CONFIG_SMP - irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq); -#endif + } else { + mips_cpu_perf_irq = -1; } + + return mips_cpu_perf_irq; } unsigned int get_c0_compare_int(void) { -#ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; - } else -#endif - { - if (cpu_has_vint) - set_vi_handler(cp0_compare_irq, mips_timer_dispatch); + } else if (gic_present) { + mips_cpu_timer_irq = gic_get_c0_compare_int(); + } else { mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } @@ -191,16 +180,14 @@ void __init plat_time_init(void) setup_pit_timer(); #endif -#ifdef CONFIG_IRQ_GIC +#ifdef CONFIG_MIPS_GIC if (gic_present) { freq = freqround(gic_frequency, 5000); printk("GIC frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); -#ifdef CONFIG_CSRC_GIC +#ifdef CONFIG_CLKSRC_MIPS_GIC gic_clocksource_init(gic_frequency); #endif } #endif - - plat_perf_setup(); } diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c index 0a168c948b01..3abe47b316aa 100644 --- a/arch/mips/mti-sead3/leds-sead3.c +++ b/arch/mips/mti-sead3/leds-sead3.c @@ -70,7 +70,6 @@ static struct platform_driver sead3_led_driver = { .remove = sead3_led_remove, .driver = { .name = DRVNAME, - .owner = THIS_MODULE, }, }; diff --git a/arch/mips/mti-sead3/sead3-ehci.c b/arch/mips/mti-sead3/sead3-ehci.c index 772fc056a92d..014dd7ba4d68 100644 --- a/arch/mips/mti-sead3/sead3-ehci.c +++ b/arch/mips/mti-sead3/sead3-ehci.c @@ -9,6 +9,9 @@ #include <linux/irq.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> +#include <linux/irqchip/mips-gic.h> + +#include <asm/mips-boards/sead3int.h> struct resource ehci_resources[] = { { @@ -17,7 +20,6 @@ struct resource ehci_resources[] = { .flags = IORESOURCE_MEM }, { - .start = MIPS_CPU_IRQ_BASE + 2, .flags = IORESOURCE_IRQ } }; @@ -37,6 +39,10 @@ static struct platform_device ehci_device = { static int __init ehci_init(void) { + if (gic_present) + ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI; + else + ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI; return platform_device_register(&ehci_device); } diff --git a/arch/mips/mti-sead3/sead3-i2c-drv.c b/arch/mips/mti-sead3/sead3-i2c-drv.c index 1f787a6a7878..2bebf0974e39 100644 --- a/arch/mips/mti-sead3/sead3-i2c-drv.c +++ b/arch/mips/mti-sead3/sead3-i2c-drv.c @@ -380,7 +380,6 @@ static int sead3_i2c_platform_resume(struct platform_device *pdev) static struct platform_driver sead3_i2c_platform_driver = { .driver = { .name = "sead3-i2c", - .owner = THIS_MODULE, }, .probe = sead3_i2c_platform_probe, .remove = sead3_i2c_platform_remove, diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c index 6a560ac03def..e31e17f81eef 100644 --- a/arch/mips/mti-sead3/sead3-int.c +++ b/arch/mips/mti-sead3/sead3-int.c @@ -7,9 +7,9 @@ */ #include <linux/init.h> #include <linux/irq.h> +#include <linux/irqchip/mips-gic.h> #include <linux/io.h> -#include <asm/gic.h> #include <asm/irq_cpu.h> #include <asm/setup.h> @@ -20,138 +20,23 @@ #define SEAD_CONFIG_BASE 0x1b100110 #define SEAD_CONFIG_SIZE 4 -static unsigned long sead3_config_reg; - -/* - * This table defines the setup for each external GIC interrupt. It is - * indexed by interrupt number. - */ -#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK -static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { - { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, - { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED }, -}; - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; - int irq; - - irq = (fls(pending) - CAUSEB_IP - 1); - if (irq >= 0) - do_IRQ(MIPS_CPU_IRQ_BASE + irq); - else - spurious_interrupt(); -} +static void __iomem *sead3_config_reg; void __init arch_init_irq(void) { - int i; - - if (!cpu_has_veic) { + if (!cpu_has_veic) mips_cpu_irq_init(); - if (cpu_has_vint) { - /* install generic handler */ - for (i = 0; i < 8; i++) - set_vi_handler(i, plat_irq_dispatch); - } - } - - sead3_config_reg = (unsigned long)ioremap_nocache(SEAD_CONFIG_BASE, - SEAD_CONFIG_SIZE); - gic_present = (REG32(sead3_config_reg) & SEAD_CONFIG_GIC_PRESENT_MSK) >> + sead3_config_reg = ioremap_nocache(SEAD_CONFIG_BASE, SEAD_CONFIG_SIZE); + gic_present = (__raw_readl(sead3_config_reg) & + SEAD_CONFIG_GIC_PRESENT_MSK) >> SEAD_CONFIG_GIC_PRESENT_SHF; pr_info("GIC: %spresent\n", (gic_present) ? "" : "not "); pr_info("EIC: %s\n", (current_cpu_data.options & MIPS_CPU_VEIC) ? "on" : "off"); if (gic_present) - gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, - ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); -} - -void gic_enable_interrupt(int irq_vec) -{ - unsigned int i, irq_source; - - /* enable all the interrupts associated with this vector */ - for (i = 0; i < gic_shared_intr_map[irq_vec].num_shared_intr; i++) { - irq_source = gic_shared_intr_map[irq_vec].intr_list[i]; - GIC_SET_INTR_MASK(irq_source); - } - /* enable all local interrupts associated with this vector */ - if (gic_shared_intr_map[irq_vec].local_intr_mask) { - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0); - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), - gic_shared_intr_map[irq_vec].local_intr_mask); - } + gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, CPU_INT_GIC, + MIPS_GIC_IRQ_BASE); } -void gic_disable_interrupt(int irq_vec) -{ - unsigned int i, irq_source; - - /* disable all the interrupts associated with this vector */ - for (i = 0; i < gic_shared_intr_map[irq_vec].num_shared_intr; i++) { - irq_source = gic_shared_intr_map[irq_vec].intr_list[i]; - GIC_CLR_INTR_MASK(irq_source); - } - /* disable all local interrupts associated with this vector */ - if (gic_shared_intr_map[irq_vec].local_intr_mask) { - GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0); - GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), - gic_shared_intr_map[irq_vec].local_intr_mask); - } -} - -void gic_irq_ack(struct irq_data *d) -{ - GIC_CLR_INTR_MASK(d->irq - gic_irq_base); -} - -void gic_finish_irq(struct irq_data *d) -{ - unsigned int irq = (d->irq - gic_irq_base); - unsigned int i, irq_source; - - /* Clear edge detectors. */ - for (i = 0; i < gic_shared_intr_map[irq].num_shared_intr; i++) { - irq_source = gic_shared_intr_map[irq].intr_list[i]; - if (gic_irq_flags[irq_source] & GIC_TRIG_EDGE) - GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq_source); - } - - /* Enable interrupts. */ - GIC_SET_INTR_MASK(irq); -} - -void __init gic_platform_init(int irqs, struct irq_chip *irq_controller) -{ - int i; - - /* - * For non-EIC mode, we want to setup the GIC in pass-through - * mode, as if the GIC didn't exist. Do not map any interrupts - * for an external interrupt controller. - */ - if (!cpu_has_veic) - return; - - for (i = gic_irq_base; i < (gic_irq_base + irqs); i++) - irq_set_chip_and_handler(i, irq_controller, handle_percpu_irq); -} diff --git a/arch/mips/mti-sead3/sead3-net.c b/arch/mips/mti-sead3/sead3-net.c index dd11e7eb771c..46176b804576 100644 --- a/arch/mips/mti-sead3/sead3-net.c +++ b/arch/mips/mti-sead3/sead3-net.c @@ -7,9 +7,12 @@ */ #include <linux/module.h> #include <linux/irq.h> +#include <linux/irqchip/mips-gic.h> #include <linux/platform_device.h> #include <linux/smsc911x.h> +#include <asm/mips-boards/sead3int.h> + static struct smsc911x_platform_config sead3_smsc911x_data = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, @@ -17,14 +20,13 @@ static struct smsc911x_platform_config sead3_smsc911x_data = { .phy_interface = PHY_INTERFACE_MODE_MII, }; -struct resource sead3_net_resourcess[] = { +struct resource sead3_net_resources[] = { { .start = 0x1f010000, .end = 0x1f01ffff, .flags = IORESOURCE_MEM }, { - .start = MIPS_CPU_IRQ_BASE + 6, .flags = IORESOURCE_IRQ } }; @@ -35,12 +37,16 @@ static struct platform_device sead3_net_device = { .dev = { .platform_data = &sead3_smsc911x_data, }, - .num_resources = ARRAY_SIZE(sead3_net_resourcess), - .resource = sead3_net_resourcess + .num_resources = ARRAY_SIZE(sead3_net_resources), + .resource = sead3_net_resources }; static int __init sead3_net_init(void) { + if (gic_present) + sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET; + else + sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET; return platform_device_register(&sead3_net_device); } diff --git a/arch/mips/mti-sead3/sead3-platform.c b/arch/mips/mti-sead3/sead3-platform.c index 6c3b33dbed18..53ee6f1f018d 100644 --- a/arch/mips/mti-sead3/sead3-platform.c +++ b/arch/mips/mti-sead3/sead3-platform.c @@ -7,12 +7,15 @@ */ #include <linux/module.h> #include <linux/init.h> +#include <linux/irqchip/mips-gic.h> #include <linux/serial_8250.h> -#define UART(base, int) \ +#include <asm/mips-boards/sead3int.h> + +#define UART(base) \ { \ .mapbase = base, \ - .irq = int, \ + .irq = -1, \ .uartclk = 14745600, \ .iotype = UPIO_MEM32, \ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \ @@ -20,8 +23,8 @@ } static struct plat_serial8250_port uart8250_data[] = { - UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */ - UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */ + UART(0x1f000900), /* ttyS0 = USB */ + UART(0x1f000800), /* ttyS1 = RS232 */ { }, }; @@ -35,6 +38,13 @@ static struct platform_device uart8250_device = { static int __init uart8250_init(void) { + if (gic_present) { + uart8250_data[0].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART0; + uart8250_data[1].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART1; + } else { + uart8250_data[0].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART0; + uart8250_data[1].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART1; + } return platform_device_register(&uart8250_device); } diff --git a/arch/mips/mti-sead3/sead3-serial.c b/arch/mips/mti-sead3/sead3-serial.c deleted file mode 100644 index bc52705bbee4..000000000000 --- a/arch/mips/mti-sead3/sead3-serial.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/serial_8250.h> - -#define UART(base, int) \ -{ \ - .mapbase = base, \ - .irq = int, \ - .uartclk = 14745600, \ - .iotype = UPIO_MEM32, \ - .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \ - .regshift = 2, \ -} - -static struct plat_serial8250_port uart8250_data[] = { - UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */ - UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */ - { }, -}; - -static struct platform_device uart8250_device = { - .name = "serial8250", - .id = PLAT8250_DEV_PLATFORM, - .dev = { - .platform_data = uart8250_data, - }, -}; - -static int __init uart8250_init(void) -{ - return platform_device_register(&uart8250_device); -} - -module_init(uart8250_init); - -MODULE_AUTHOR("Chris Dearman <chris@mips.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("8250 UART probe driver for the SEAD-3 platform"); diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index 678d03d53c60..ec1dd2491f96 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -6,6 +6,7 @@ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> +#include <linux/irqchip/mips-gic.h> #include <asm/cpu.h> #include <asm/setup.h> @@ -13,19 +14,6 @@ #include <asm/irq.h> #include <asm/mips-boards/generic.h> -static int mips_cpu_timer_irq; -static int mips_cpu_perf_irq; - -static void mips_timer_dispatch(void) -{ - do_IRQ(mips_cpu_timer_irq); -} - -static void mips_perf_dispatch(void) -{ - do_IRQ(mips_cpu_perf_irq); -} - static void __iomem *status_reg = (void __iomem *)0xbf000410; /* @@ -81,21 +69,20 @@ void read_persistent_clock(struct timespec *ts) ts->tv_nsec = 0; } -static void __init plat_perf_setup(void) +int get_c0_perfcount_int(void) { - if (cp0_perfcount_irq >= 0) { - if (cpu_has_vint) - set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); - mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; - } + if (gic_present) + return gic_get_c0_compare_int(); + if (cp0_perfcount_irq >= 0) + return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + return -1; } unsigned int get_c0_compare_int(void) { - if (cpu_has_vint) - set_vi_handler(cp0_compare_irq, mips_timer_dispatch); - mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; - return mips_cpu_timer_irq; + if (gic_present) + return gic_get_c0_compare_int(); + return MIPS_CPU_IRQ_BASE + cp0_compare_irq; } void __init plat_time_init(void) @@ -108,6 +95,4 @@ void __init plat_time_init(void) (est_freq % 1000000) * 100 / 1000000); mips_scroll_message(); - - plat_perf_setup(); } diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 9b55143d19db..9fd6834a2172 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -426,7 +426,7 @@ static inline void emit_mod(unsigned int dst, unsigned int src, u32 *p = &ctx->target[ctx->idx]; uasm_i_divu(&p, dst, src); p = &ctx->target[ctx->idx + 1]; - uasm_i_mflo(&p, dst); + uasm_i_mfhi(&p, dst); } ctx->idx += 2; /* 2 insts */ } @@ -971,7 +971,7 @@ load_ind: break; case BPF_ALU | BPF_MOD | BPF_K: /* A %= k */ - if (k == 1 || optimize_div(&k)) { + if (k == 1) { ctx->flags |= SEEN_A; emit_jit_reg_move(r_A, r_zero, ctx); } else { diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile index 9c0a6782c091..070afdb297df 100644 --- a/arch/mips/oprofile/Makefile +++ b/arch/mips/oprofile/Makefile @@ -14,3 +14,4 @@ oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o +oprofile-$(CONFIG_CPU_LOONGSON3) += op_model_loongson3.o diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 83a1dfd8f0e3..5e645c9a3162 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -65,7 +65,7 @@ static inline int is_end_of_function_marker(union mips_instruction *ip) * - handle cases where the stack is adjusted inside a function * (generally doesn't happen) * - find optimal value for max_instr_check - * - try to find a way to handle leaf functions + * - try to find a better way to handle leaf functions */ static inline int unwind_user_frame(struct stackframe *old_frame, @@ -104,7 +104,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, } if (!ra_offset || !stack_size) - return -1; + goto done; if (ra_offset) { new_frame.ra = old_frame->sp + ra_offset; @@ -121,6 +121,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, if (new_frame.sp > old_frame->sp) return -2; +done: new_frame.pc = old_frame->ra; *old_frame = new_frame; diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index e74732449478..a26cbe372e06 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c @@ -18,6 +18,7 @@ extern struct op_mips_model op_model_mipsxx_ops __weak; extern struct op_mips_model op_model_loongson2_ops __weak; +extern struct op_mips_model op_model_loongson3_ops __weak; static struct op_mips_model *model; @@ -104,8 +105,17 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) case CPU_LOONGSON2: lmodel = &op_model_loongson2_ops; break; + case CPU_LOONGSON3: + lmodel = &op_model_loongson3_ops; + break; }; + /* + * Always set the backtrace. This allows unsupported CPU types to still + * use timer-based oprofile. + */ + ops->backtrace = op_mips_backtrace; + if (!lmodel) return -ENODEV; @@ -121,7 +131,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->start = op_mips_start; ops->stop = op_mips_stop; ops->cpu_type = lmodel->cpu_type; - ops->backtrace = op_mips_backtrace; printk(KERN_INFO "oprofile: using %s performance monitoring.\n", lmodel->cpu_type); diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c new file mode 100644 index 000000000000..8bcf7fc40f0d --- /dev/null +++ b/arch/mips/oprofile/op_model_loongson3.c @@ -0,0 +1,220 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/smp.h> +#include <linux/proc_fs.h> +#include <linux/oprofile.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <asm/uaccess.h> +#include <irq.h> +#include <loongson.h> +#include "op_impl.h" + +#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63) + +#define LOONGSON3_PERFCTRL_EXL (1UL << 0) +#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1) +#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2) +#define LOONGSON3_PERFCTRL_USER (1UL << 3) +#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4) +#define LOONGSON3_PERFCTRL_W (1UL << 30) +#define LOONGSON3_PERFCTRL_M (1UL << 31) +#define LOONGSON3_PERFCTRL_EVENT(idx, event) \ + (((event) & (idx ? 0x0f : 0x3f)) << 5) + +/* Loongson-3 PerfCount performance counter1 register */ +#define read_c0_perflo1() __read_64bit_c0_register($25, 0) +#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val) +#define read_c0_perfhi1() __read_64bit_c0_register($25, 1) +#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val) + +/* Loongson-3 PerfCount performance counter2 register */ +#define read_c0_perflo2() __read_64bit_c0_register($25, 2) +#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val) +#define read_c0_perfhi2() __read_64bit_c0_register($25, 3) +#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val) + +static int (*save_perf_irq)(void); + +static struct loongson3_register_config { + unsigned int control1; + unsigned int control2; + unsigned long long reset_counter1; + unsigned long long reset_counter2; + int ctr1_enable, ctr2_enable; +} reg; + +static void reset_counters(void *arg) +{ + write_c0_perfhi1(0); + write_c0_perfhi2(0); + write_c0_perflo1(0xc0000000); + write_c0_perflo2(0x40000000); +} + +/* Compute all of the registers in preparation for enabling profiling. */ +static void loongson3_reg_setup(struct op_counter_config *ctr) +{ + unsigned int control1 = 0; + unsigned int control2 = 0; + + reg.reset_counter1 = 0; + reg.reset_counter2 = 0; + /* Compute the performance counter control word. */ + /* For now count kernel and user mode */ + if (ctr[0].enabled) { + control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) | + LOONGSON3_PERFCTRL_ENABLE; + if (ctr[0].kernel) + control1 |= LOONGSON3_PERFCTRL_KERNEL; + if (ctr[0].user) + control1 |= LOONGSON3_PERFCTRL_USER; + reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count; + } + + if (ctr[1].enabled) { + control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) | + LOONGSON3_PERFCTRL_ENABLE; + if (ctr[1].kernel) + control2 |= LOONGSON3_PERFCTRL_KERNEL; + if (ctr[1].user) + control2 |= LOONGSON3_PERFCTRL_USER; + reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count; + } + + if (ctr[0].enabled) + control1 |= LOONGSON3_PERFCTRL_EXL; + if (ctr[1].enabled) + control2 |= LOONGSON3_PERFCTRL_EXL; + + reg.control1 = control1; + reg.control2 = control2; + reg.ctr1_enable = ctr[0].enabled; + reg.ctr2_enable = ctr[1].enabled; +} + +/* Program all of the registers in preparation for enabling profiling. */ +static void loongson3_cpu_setup(void *args) +{ + uint64_t perfcount1, perfcount2; + + perfcount1 = reg.reset_counter1; + perfcount2 = reg.reset_counter2; + write_c0_perfhi1(perfcount1); + write_c0_perfhi2(perfcount2); +} + +static void loongson3_cpu_start(void *args) +{ + /* Start all counters on current CPU */ + reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M); + reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M); + + if (reg.ctr1_enable) + write_c0_perflo1(reg.control1); + if (reg.ctr2_enable) + write_c0_perflo2(reg.control2); +} + +static void loongson3_cpu_stop(void *args) +{ + /* Stop all counters on current CPU */ + write_c0_perflo1(0xc0000000); + write_c0_perflo2(0x40000000); + memset(®, 0, sizeof(reg)); +} + +static int loongson3_perfcount_handler(void) +{ + unsigned long flags; + uint64_t counter1, counter2; + uint32_t cause, handled = IRQ_NONE; + struct pt_regs *regs = get_irq_regs(); + + cause = read_c0_cause(); + if (!(cause & CAUSEF_PCI)) + return handled; + + counter1 = read_c0_perfhi1(); + counter2 = read_c0_perfhi2(); + + local_irq_save(flags); + + if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) { + if (reg.ctr1_enable) + oprofile_add_sample(regs, 0); + counter1 = reg.reset_counter1; + } + if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) { + if (reg.ctr2_enable) + oprofile_add_sample(regs, 1); + counter2 = reg.reset_counter2; + } + + local_irq_restore(flags); + + write_c0_perfhi1(counter1); + write_c0_perfhi2(counter2); + + if (!(cause & CAUSEF_TI)) + handled = IRQ_HANDLED; + + return handled; +} + +static int loongson3_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + write_c0_perflo1(reg.control1); + write_c0_perflo2(reg.control2); + break; + case CPU_DYING: + case CPU_DYING_FROZEN: + write_c0_perflo1(0xc0000000); + write_c0_perflo2(0x40000000); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block loongson3_notifier_block = { + .notifier_call = loongson3_cpu_callback +}; + +static int __init loongson3_init(void) +{ + on_each_cpu(reset_counters, NULL, 1); + register_hotcpu_notifier(&loongson3_notifier_block); + save_perf_irq = perf_irq; + perf_irq = loongson3_perfcount_handler; + + return 0; +} + +static void loongson3_exit(void) +{ + on_each_cpu(reset_counters, NULL, 1); + unregister_hotcpu_notifier(&loongson3_notifier_block); + perf_irq = save_perf_irq; +} + +struct op_mips_model op_model_loongson3_ops = { + .reg_setup = loongson3_reg_setup, + .cpu_setup = loongson3_cpu_setup, + .init = loongson3_init, + .exit = loongson3_exit, + .cpu_start = loongson3_cpu_start, + .cpu_stop = loongson3_cpu_stop, + .cpu_type = "mips/loongson3", + .num_counters = 2 +}; diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index 42821ae2d77e..01f721a85c5b 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/smp.h> #include <asm/irq_regs.h> +#include <asm/time.h> #include "op_impl.h" @@ -35,6 +36,7 @@ #define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13) static int (*save_perf_irq)(void); +static int perfcount_irq; /* * XLR has only one set of counters per core. Designate the @@ -431,8 +433,16 @@ static int __init mipsxx_init(void) save_perf_irq = perf_irq; perf_irq = mipsxx_perfcount_handler; - if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq)) - return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int, + if (get_c0_perfcount_int) + perfcount_irq = get_c0_perfcount_int(); + else if ((cp0_perfcount_irq >= 0) && + (cp0_compare_irq != cp0_perfcount_irq)) + perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + else + perfcount_irq = -1; + + if (perfcount_irq >= 0) + return request_irq(perfcount_irq, mipsxx_perfcount_int, 0, "Perfcounter", save_perf_irq); return 0; @@ -442,8 +452,8 @@ static void mipsxx_exit(void) { int counters = op_model_mipsxx_ops.num_counters; - if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq)) - free_irq(cp0_perfcount_irq, save_perf_irq); + if (perfcount_irq >= 0) + free_irq(perfcount_irq, save_perf_irq); counters = counters_per_cpu_to_total(counters); on_each_cpu(reset_counters, (void *)(long)counters, 1); diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 6523d558ff5a..300591c6278d 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \ ops-bcm63xx.o obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o +obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o @@ -42,6 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o obj-$(CONFIG_LANTIQ) += fixup-lantiq.o obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o +obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 63bbe07a1ccd..cffaaf4aae3c 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -178,7 +178,7 @@ msi_irq_allocated: pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); irq_set_msi_desc(irq, desc); - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); return 0; } diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c index f7ac3edda1b2..6a40f24c91b4 100644 --- a/arch/mips/pci/msi-xlp.c +++ b/arch/mips/pci/msi-xlp.c @@ -217,7 +217,7 @@ static void xlp_msix_mask_ack(struct irq_data *d) msixvec = nlm_irq_msixvec(d->irq); link = nlm_irq_msixlink(msixvec); - mask_msi_irq(d); + pci_msi_mask_irq(d); md = irq_data_get_irq_handler_data(d); /* Ack MSI on bridge */ @@ -239,10 +239,10 @@ static void xlp_msix_mask_ack(struct irq_data *d) static struct irq_chip xlp_msix_chip = { .name = "XLP-MSIX", - .irq_enable = unmask_msi_irq, - .irq_disable = mask_msi_irq, + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, .irq_mask_ack = xlp_msix_mask_ack, - .irq_unmask = unmask_msi_irq, + .irq_unmask = pci_msi_unmask_irq, }; void arch_teardown_msi_irq(unsigned int irq) @@ -345,7 +345,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link, if (ret < 0) return ret; - write_msi_msg(xirq, &msg); + pci_write_msi_msg(xirq, &msg); return 0; } @@ -446,7 +446,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link, if (ret < 0) return ret; - write_msi_msg(xirq, &msg); + pci_write_msi_msg(xirq, &msg); return 0; } diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c index 13eea696bbe7..d02eb9d16b55 100644 --- a/arch/mips/pci/ops-bcm63xx.c +++ b/arch/mips/pci/ops-bcm63xx.c @@ -469,7 +469,7 @@ static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn) { switch (bus->number) { case PCIE_BUS_BRIDGE: - return (PCI_SLOT(devfn) == 0); + return PCI_SLOT(devfn) == 0; case PCIE_BUS_DEVICE: if (PCI_SLOT(devfn) == 0) return bcm_pcie_readl(PCIE_DLSTATUS_REG) diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c index a1a7c9f4096e..b9d1fd0ff7e2 100644 --- a/arch/mips/pci/ops-nile4.c +++ b/arch/mips/pci/ops-nile4.c @@ -13,8 +13,6 @@ volatile unsigned long *const vrc_pciregs = (void *) Vrc5074_BASE; -static DEFINE_SPINLOCK(nile4_pci_lock); - static int nile4_pcibios_config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *val) { @@ -76,7 +74,6 @@ static int nile4_pcibios_config_access(unsigned char access_type, static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - unsigned long flags; u32 data = 0; int err; @@ -85,11 +82,8 @@ static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn, else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; - spin_lock_irqsave(&nile4_pci_lock, flags); err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, - &data); - spin_unlock_irqrestore(&nile4_pci_lock, flags); - + &data); if (err) return err; @@ -106,7 +100,6 @@ static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn, static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - unsigned long flags; u32 data = 0; int err; @@ -115,11 +108,8 @@ static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn, else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; - spin_lock_irqsave(&nile4_pci_lock, flags); err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); - spin_unlock_irqrestore(&nile4_pci_lock, flags); - if (err) return err; diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index 50034f985be1..dd2d9f7e9412 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c @@ -193,8 +193,6 @@ static void pci_proc_init(void) } #endif /* CONFIG_PROC_FS && PCI_COUNTERS */ -static DEFINE_SPINLOCK(bpci_lock); - /***************************************************************************** * * STRUCT: pci_io_resource @@ -368,7 +366,6 @@ int msp_pcibios_config_access(unsigned char access_type, struct msp_pci_regs *preg = (void *)PCI_BASE_REG; unsigned char bus_num = bus->number; unsigned char dev_fn = (unsigned char)devfn; - unsigned long flags; unsigned long intr; unsigned long value; static char pciirqflag; @@ -401,10 +398,7 @@ int msp_pcibios_config_access(unsigned char access_type, } #if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) - local_irq_save(flags); vpe_status = dvpe(); -#else - spin_lock_irqsave(&bpci_lock, flags); #endif /* @@ -457,9 +451,6 @@ int msp_pcibios_config_access(unsigned char access_type, #if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) evpe(vpe_status); - local_irq_restore(flags); -#else - spin_unlock_irqrestore(&bpci_lock, flags); #endif return -1; @@ -467,9 +458,6 @@ int msp_pcibios_config_access(unsigned char access_type, #if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) evpe(vpe_status); - local_irq_restore(flags); -#else - spin_unlock_irqrestore(&bpci_lock, flags); #endif return PCIBIOS_SUCCESSFUL; diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index c19600a03460..28952637a862 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -505,7 +505,6 @@ static struct platform_driver alchemy_pcictl_driver = { .probe = alchemy_pci_probe, .driver = { .name = "alchemy-pci", - .owner = THIS_MODULE, }, }; diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c new file mode 100644 index 000000000000..bd2b3b60da83 --- /dev/null +++ b/arch/mips/pci/pci-ar2315.c @@ -0,0 +1,511 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +/** + * Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA + * and interrupt. PCI interface supports MMIO access method, but does not + * seem to support I/O ports. + * + * Read/write operation in the region 0x80000000-0xBFFFFFFF causes + * a memory read/write command on the PCI bus. 30 LSBs of address on + * the bus are taken from memory read/write request and 2 MSBs are + * determined by PCI unit configuration. + * + * To work with the configuration space instead of memory is necessary set + * the CFG_SEL bit in the PCI_MISC_CONFIG register. + * + * Devices on the bus can perform DMA requests via chip BAR1. PCI host + * controller BARs are programmend as if an external device is programmed. + * Which means that during configuration, IDSEL pin of the chip should be + * asserted. + * + * We know (and support) only one board that uses the PCI interface - + * Fonera 2.0g (FON2202). It has a USB EHCI controller connected to the + * AR2315 PCI bus. IDSEL pin of USB controller is connected to AD[13] line + * and IDSEL pin of AR2315 is connected to AD[16] line. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <asm/paccess.h> + +/* + * PCI Bus Interface Registers + */ +#define AR2315_PCI_1MS_REG 0x0008 + +#define AR2315_PCI_1MS_MASK 0x3FFFF /* # of AHB clk cycles in 1ms */ + +#define AR2315_PCI_MISC_CONFIG 0x000c + +#define AR2315_PCIMISC_TXD_EN 0x00000001 /* Enable TXD for fragments */ +#define AR2315_PCIMISC_CFG_SEL 0x00000002 /* Mem or Config cycles */ +#define AR2315_PCIMISC_GIG_MASK 0x0000000C /* bits 31-30 for pci req */ +#define AR2315_PCIMISC_RST_MODE 0x00000030 +#define AR2315_PCIRST_INPUT 0x00000000 /* 4:5=0 rst is input */ +#define AR2315_PCIRST_LOW 0x00000010 /* 4:5=1 rst to GND */ +#define AR2315_PCIRST_HIGH 0x00000020 /* 4:5=2 rst to VDD */ +#define AR2315_PCIGRANT_EN 0x00000000 /* 6:7=0 early grant en */ +#define AR2315_PCIGRANT_FRAME 0x00000040 /* 6:7=1 grant waits 4 frame */ +#define AR2315_PCIGRANT_IDLE 0x00000080 /* 6:7=2 grant waits 4 idle */ +#define AR2315_PCIGRANT_GAP 0x00000000 /* 6:7=2 grant waits 4 idle */ +#define AR2315_PCICACHE_DIS 0x00001000 /* PCI external access cache + * disable */ + +#define AR2315_PCI_OUT_TSTAMP 0x0010 + +#define AR2315_PCI_UNCACHE_CFG 0x0014 + +#define AR2315_PCI_IN_EN 0x0100 + +#define AR2315_PCI_IN_EN0 0x01 /* Enable chain 0 */ +#define AR2315_PCI_IN_EN1 0x02 /* Enable chain 1 */ +#define AR2315_PCI_IN_EN2 0x04 /* Enable chain 2 */ +#define AR2315_PCI_IN_EN3 0x08 /* Enable chain 3 */ + +#define AR2315_PCI_IN_DIS 0x0104 + +#define AR2315_PCI_IN_DIS0 0x01 /* Disable chain 0 */ +#define AR2315_PCI_IN_DIS1 0x02 /* Disable chain 1 */ +#define AR2315_PCI_IN_DIS2 0x04 /* Disable chain 2 */ +#define AR2315_PCI_IN_DIS3 0x08 /* Disable chain 3 */ + +#define AR2315_PCI_IN_PTR 0x0200 + +#define AR2315_PCI_OUT_EN 0x0400 + +#define AR2315_PCI_OUT_EN0 0x01 /* Enable chain 0 */ + +#define AR2315_PCI_OUT_DIS 0x0404 + +#define AR2315_PCI_OUT_DIS0 0x01 /* Disable chain 0 */ + +#define AR2315_PCI_OUT_PTR 0x0408 + +/* PCI interrupt status (write one to clear) */ +#define AR2315_PCI_ISR 0x0500 + +#define AR2315_PCI_INT_TX 0x00000001 /* Desc In Completed */ +#define AR2315_PCI_INT_TXOK 0x00000002 /* Desc In OK */ +#define AR2315_PCI_INT_TXERR 0x00000004 /* Desc In ERR */ +#define AR2315_PCI_INT_TXEOL 0x00000008 /* Desc In End-of-List */ +#define AR2315_PCI_INT_RX 0x00000010 /* Desc Out Completed */ +#define AR2315_PCI_INT_RXOK 0x00000020 /* Desc Out OK */ +#define AR2315_PCI_INT_RXERR 0x00000040 /* Desc Out ERR */ +#define AR2315_PCI_INT_RXEOL 0x00000080 /* Desc Out EOL */ +#define AR2315_PCI_INT_TXOOD 0x00000200 /* Desc In Out-of-Desc */ +#define AR2315_PCI_INT_DESCMASK 0x0000FFFF /* Desc Mask */ +#define AR2315_PCI_INT_EXT 0x02000000 /* Extern PCI INTA */ +#define AR2315_PCI_INT_ABORT 0x04000000 /* PCI bus abort event */ + +/* PCI interrupt mask */ +#define AR2315_PCI_IMR 0x0504 + +/* Global PCI interrupt enable */ +#define AR2315_PCI_IER 0x0508 + +#define AR2315_PCI_IER_DISABLE 0x00 /* disable pci interrupts */ +#define AR2315_PCI_IER_ENABLE 0x01 /* enable pci interrupts */ + +#define AR2315_PCI_HOST_IN_EN 0x0800 +#define AR2315_PCI_HOST_IN_DIS 0x0804 +#define AR2315_PCI_HOST_IN_PTR 0x0810 +#define AR2315_PCI_HOST_OUT_EN 0x0900 +#define AR2315_PCI_HOST_OUT_DIS 0x0904 +#define AR2315_PCI_HOST_OUT_PTR 0x0908 + +/* + * PCI interrupts, which share IP5 + * Keep ordered according to AR2315_PCI_INT_XXX bits + */ +#define AR2315_PCI_IRQ_EXT 25 +#define AR2315_PCI_IRQ_ABORT 26 +#define AR2315_PCI_IRQ_COUNT 27 + +/* Arbitrary size of memory region to access the configuration space */ +#define AR2315_PCI_CFG_SIZE 0x00100000 + +#define AR2315_PCI_HOST_SLOT 3 +#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS) + +/* ??? access BAR */ +#define AR2315_PCI_HOST_MBAR0 0x10000000 +/* RAM access BAR */ +#define AR2315_PCI_HOST_MBAR1 AR2315_PCI_HOST_SDRAM_BASEADDR +/* ??? access BAR */ +#define AR2315_PCI_HOST_MBAR2 0x30000000 + +struct ar2315_pci_ctrl { + void __iomem *cfg_mem; + void __iomem *mmr_mem; + unsigned irq; + unsigned irq_ext; + struct irq_domain *domain; + struct pci_controller pci_ctrl; + struct resource mem_res; + struct resource io_res; +}; + +static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return container_of(hose, struct ar2315_pci_ctrl, pci_ctrl); +} + +static inline u32 ar2315_pci_reg_read(struct ar2315_pci_ctrl *apc, u32 reg) +{ + return __raw_readl(apc->mmr_mem + reg); +} + +static inline void ar2315_pci_reg_write(struct ar2315_pci_ctrl *apc, u32 reg, + u32 val) +{ + __raw_writel(val, apc->mmr_mem + reg); +} + +static inline void ar2315_pci_reg_mask(struct ar2315_pci_ctrl *apc, u32 reg, + u32 mask, u32 val) +{ + u32 ret = ar2315_pci_reg_read(apc, reg); + + ret &= ~mask; + ret |= val; + ar2315_pci_reg_write(apc, reg, ret); +} + +static int ar2315_pci_cfg_access(struct ar2315_pci_ctrl *apc, unsigned devfn, + int where, int size, u32 *ptr, bool write) +{ + int func = PCI_FUNC(devfn); + int dev = PCI_SLOT(devfn); + u32 addr = (1 << (13 + dev)) | (func << 8) | (where & ~3); + u32 mask = 0xffffffff >> 8 * (4 - size); + u32 sh = (where & 3) * 8; + u32 value, isr; + + /* Prevent access past the remapped area */ + if (addr >= AR2315_PCI_CFG_SIZE || dev > 18) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Clear pending errors */ + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT); + /* Select Configuration access */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, 0, + AR2315_PCIMISC_CFG_SEL); + + mb(); /* PCI must see space change before we begin */ + + value = __raw_readl(apc->cfg_mem + addr); + + isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR); + + if (isr & AR2315_PCI_INT_ABORT) + goto exit_err; + + if (write) { + value = (value & ~(mask << sh)) | *ptr << sh; + __raw_writel(value, apc->cfg_mem + addr); + isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR); + if (isr & AR2315_PCI_INT_ABORT) + goto exit_err; + } else { + *ptr = (value >> sh) & mask; + } + + goto exit; + +exit_err: + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT); + if (!write) + *ptr = 0xffffffff; + +exit: + /* Select Memory access */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, AR2315_PCIMISC_CFG_SEL, + 0); + + return isr & AR2315_PCI_INT_ABORT ? PCIBIOS_DEVICE_NOT_FOUND : + PCIBIOS_SUCCESSFUL; +} + +static inline int ar2315_pci_local_cfg_rd(struct ar2315_pci_ctrl *apc, + unsigned devfn, int where, u32 *val) +{ + return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), val, + false); +} + +static inline int ar2315_pci_local_cfg_wr(struct ar2315_pci_ctrl *apc, + unsigned devfn, int where, u32 val) +{ + return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), &val, + true); +} + +static int ar2315_pci_cfg_read(struct pci_bus *bus, unsigned devfn, int where, + int size, u32 *value) +{ + struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus); + + if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT) + return PCIBIOS_DEVICE_NOT_FOUND; + + return ar2315_pci_cfg_access(apc, devfn, where, size, value, false); +} + +static int ar2315_pci_cfg_write(struct pci_bus *bus, unsigned devfn, int where, + int size, u32 value) +{ + struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus); + + if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT) + return PCIBIOS_DEVICE_NOT_FOUND; + + return ar2315_pci_cfg_access(apc, devfn, where, size, &value, true); +} + +static struct pci_ops ar2315_pci_ops = { + .read = ar2315_pci_cfg_read, + .write = ar2315_pci_cfg_write, +}; + +static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc) +{ + unsigned devfn = PCI_DEVFN(AR2315_PCI_HOST_SLOT, 0); + int res; + u32 id; + + res = ar2315_pci_local_cfg_rd(apc, devfn, PCI_VENDOR_ID, &id); + if (res != PCIBIOS_SUCCESSFUL || id != AR2315_PCI_HOST_DEVID) + return -ENODEV; + + /* Program MBARs */ + ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_0, + AR2315_PCI_HOST_MBAR0); + ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_1, + AR2315_PCI_HOST_MBAR1); + ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_2, + AR2315_PCI_HOST_MBAR2); + + /* Run */ + ar2315_pci_local_cfg_wr(apc, devfn, PCI_COMMAND, PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL | + PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY | + PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK); + + return 0; +} + +static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc) +{ + struct ar2315_pci_ctrl *apc = irq_get_handler_data(irq); + u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & + ar2315_pci_reg_read(apc, AR2315_PCI_IMR); + unsigned pci_irq = 0; + + if (pending) + pci_irq = irq_find_mapping(apc->domain, __ffs(pending)); + + if (pci_irq) + generic_handle_irq(pci_irq); + else + spurious_interrupt(); +} + +static void ar2315_pci_irq_mask(struct irq_data *d) +{ + struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d); + + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, BIT(d->hwirq), 0); +} + +static void ar2315_pci_irq_mask_ack(struct irq_data *d) +{ + struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d); + u32 m = BIT(d->hwirq); + + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, m, 0); + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, m); +} + +static void ar2315_pci_irq_unmask(struct irq_data *d) +{ + struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d); + + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, 0, BIT(d->hwirq)); +} + +static struct irq_chip ar2315_pci_irq_chip = { + .name = "AR2315-PCI", + .irq_mask = ar2315_pci_irq_mask, + .irq_mask_ack = ar2315_pci_irq_mask_ack, + .irq_unmask = ar2315_pci_irq_unmask, +}; + +static int ar2315_pci_irq_map(struct irq_domain *d, unsigned irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &ar2315_pci_irq_chip, handle_level_irq); + irq_set_chip_data(irq, d->host_data); + return 0; +} + +static struct irq_domain_ops ar2315_pci_irq_domain_ops = { + .map = ar2315_pci_irq_map, +}; + +static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc) +{ + ar2315_pci_reg_mask(apc, AR2315_PCI_IER, AR2315_PCI_IER_ENABLE, 0); + ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, (AR2315_PCI_INT_ABORT | + AR2315_PCI_INT_EXT), 0); + + apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT); + + irq_set_chained_handler(apc->irq, ar2315_pci_irq_handler); + irq_set_handler_data(apc->irq, apc); + + /* Clear any pending Abort or external Interrupts + * and enable interrupt processing */ + ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT | + AR2315_PCI_INT_EXT); + ar2315_pci_reg_mask(apc, AR2315_PCI_IER, 0, AR2315_PCI_IER_ENABLE); +} + +static int ar2315_pci_probe(struct platform_device *pdev) +{ + struct ar2315_pci_ctrl *apc; + struct device *dev = &pdev->dev; + struct resource *res; + int irq, err; + + apc = devm_kzalloc(dev, sizeof(*apc), GFP_KERNEL); + if (!apc) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + apc->irq = irq; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ar2315-pci-ctrl"); + apc->mmr_mem = devm_ioremap_resource(dev, res); + if (IS_ERR(apc->mmr_mem)) + return PTR_ERR(apc->mmr_mem); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ar2315-pci-ext"); + if (!res) + return -EINVAL; + + apc->mem_res.name = "AR2315 PCI mem space"; + apc->mem_res.parent = res; + apc->mem_res.start = res->start; + apc->mem_res.end = res->end; + apc->mem_res.flags = IORESOURCE_MEM; + + /* Remap PCI config space */ + apc->cfg_mem = devm_ioremap_nocache(dev, res->start, + AR2315_PCI_CFG_SIZE); + if (!apc->cfg_mem) { + dev_err(dev, "failed to remap PCI config space\n"); + return -ENOMEM; + } + + /* Reset the PCI bus by setting bits 5-4 in PCI_MCFG */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, + AR2315_PCIMISC_RST_MODE, + AR2315_PCIRST_LOW); + msleep(100); + + /* Bring the PCI out of reset */ + ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, + AR2315_PCIMISC_RST_MODE, + AR2315_PCIRST_HIGH | AR2315_PCICACHE_DIS | 0x8); + + ar2315_pci_reg_write(apc, AR2315_PCI_UNCACHE_CFG, + 0x1E | /* 1GB uncached */ + (1 << 5) | /* Enable uncached */ + (0x2 << 30) /* Base: 0x80000000 */); + ar2315_pci_reg_read(apc, AR2315_PCI_UNCACHE_CFG); + + msleep(500); + + err = ar2315_pci_host_setup(apc); + if (err) + return err; + + apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT, + &ar2315_pci_irq_domain_ops, apc); + if (!apc->domain) { + dev_err(dev, "failed to add IRQ domain\n"); + return -ENOMEM; + } + + ar2315_pci_irq_init(apc); + + /* PCI controller does not support I/O ports */ + apc->io_res.name = "AR2315 IO space"; + apc->io_res.start = 0; + apc->io_res.end = 0; + apc->io_res.flags = IORESOURCE_IO, + + apc->pci_ctrl.pci_ops = &ar2315_pci_ops; + apc->pci_ctrl.mem_resource = &apc->mem_res, + apc->pci_ctrl.io_resource = &apc->io_res, + + register_pci_controller(&apc->pci_ctrl); + + dev_info(dev, "register PCI controller\n"); + + return 0; +} + +static struct platform_driver ar2315_pci_driver = { + .probe = ar2315_pci_probe, + .driver = { + .name = "ar2315-pci", + .owner = THIS_MODULE, + }, +}; + +static int __init ar2315_pci_init(void) +{ + return platform_driver_register(&ar2315_pci_driver); +} +arch_initcall(ar2315_pci_init); + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(dev->bus); + + return slot ? 0 : apc->irq_ext; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index d471a26dd5f8..9e62ad31d4b5 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -50,7 +50,6 @@ struct ar71xx_pci_controller { void __iomem *cfg_base; - spinlock_t lock; int irq; int irq_base; struct pci_controller pci_ctrl; @@ -182,7 +181,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, { struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); void __iomem *base = apc->cfg_base; - unsigned long flags; u32 data; int err; int ret; @@ -190,8 +188,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, ret = PCIBIOS_SUCCESSFUL; data = ~0; - spin_lock_irqsave(&apc->lock, flags); - err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, AR71XX_PCI_CFG_CMD_READ); if (err) @@ -199,8 +195,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, else data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA); - spin_unlock_irqrestore(&apc->lock, flags); - *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7]; return ret; @@ -211,15 +205,12 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, { struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); void __iomem *base = apc->cfg_base; - unsigned long flags; int err; int ret; value = value << (8 * (where & 3)); ret = PCIBIOS_SUCCESSFUL; - spin_lock_irqsave(&apc->lock, flags); - err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, AR71XX_PCI_CFG_CMD_WRITE); if (err) @@ -227,8 +218,6 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, else __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA); - spin_unlock_irqrestore(&apc->lock, flags); - return ret; } @@ -360,8 +349,6 @@ static int ar71xx_pci_probe(struct platform_device *pdev) if (!apc) return -ENOMEM; - spin_lock_init(&apc->lock); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); apc->cfg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(apc->cfg_base)) @@ -416,7 +403,6 @@ static struct platform_driver ar71xx_pci_driver = { .probe = ar71xx_pci_probe, .driver = { .name = "ar71xx-pci", - .owner = THIS_MODULE, }, }; diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 785b2659b519..a1b7d2a1b0d5 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -9,7 +9,6 @@ * by the Free Software Foundation. */ -#include <linux/spinlock.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/module.h> @@ -48,8 +47,6 @@ struct ar724x_pci_controller { bool bar0_is_cached; u32 bar0_value; - spinlock_t lock; - struct pci_controller pci_controller; struct resource io_res; struct resource mem_res; @@ -75,7 +72,6 @@ pci_bus_to_ar724x_controller(struct pci_bus *bus) static int ar724x_pci_local_write(struct ar724x_pci_controller *apc, int where, int size, u32 value) { - unsigned long flags; void __iomem *base; u32 data; int s; @@ -86,8 +82,6 @@ static int ar724x_pci_local_write(struct ar724x_pci_controller *apc, return PCIBIOS_DEVICE_NOT_FOUND; base = apc->crp_base; - - spin_lock_irqsave(&apc->lock, flags); data = __raw_readl(base + (where & ~3)); switch (size) { @@ -105,14 +99,12 @@ static int ar724x_pci_local_write(struct ar724x_pci_controller *apc, data = value; break; default: - spin_unlock_irqrestore(&apc->lock, flags); return PCIBIOS_BAD_REGISTER_NUMBER; } __raw_writel(data, base + (where & ~3)); /* flush write */ __raw_readl(base + (where & ~3)); - spin_unlock_irqrestore(&apc->lock, flags); return PCIBIOS_SUCCESSFUL; } @@ -121,7 +113,6 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, uint32_t *value) { struct ar724x_pci_controller *apc; - unsigned long flags; void __iomem *base; u32 data; @@ -133,8 +124,6 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; base = apc->devcfg_base; - - spin_lock_irqsave(&apc->lock, flags); data = __raw_readl(base + (where & ~3)); switch (size) { @@ -153,13 +142,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, case 4: break; default: - spin_unlock_irqrestore(&apc->lock, flags); - return PCIBIOS_BAD_REGISTER_NUMBER; } - spin_unlock_irqrestore(&apc->lock, flags); - if (where == PCI_BASE_ADDRESS_0 && size == 4 && apc->bar0_is_cached) { /* use the cached value */ @@ -175,7 +160,6 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, uint32_t value) { struct ar724x_pci_controller *apc; - unsigned long flags; void __iomem *base; u32 data; int s; @@ -209,8 +193,6 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, } base = apc->devcfg_base; - - spin_lock_irqsave(&apc->lock, flags); data = __raw_readl(base + (where & ~3)); switch (size) { @@ -228,15 +210,12 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, data = value; break; default: - spin_unlock_irqrestore(&apc->lock, flags); - return PCIBIOS_BAD_REGISTER_NUMBER; } __raw_writel(data, base + (where & ~3)); /* flush write */ __raw_readl(base + (where & ~3)); - spin_unlock_irqrestore(&apc->lock, flags); return PCIBIOS_SUCCESSFUL; } @@ -380,8 +359,6 @@ static int ar724x_pci_probe(struct platform_device *pdev) if (apc->irq < 0) return -EINVAL; - spin_lock_init(&apc->lock); - res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base"); if (!res) return -EINVAL; @@ -423,7 +400,6 @@ static struct platform_driver ar724x_pci_driver = { .probe = ar724x_pci_probe, .driver = { .name = "ar724x-pci", - .owner = THIS_MODULE, }, }; diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index d3ed15b2b2d1..8b117e638306 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -242,7 +242,6 @@ static struct platform_driver ltq_pci_driver = { .probe = ltq_pci_probe, .driver = { .name = "pci-xway", - .owner = THIS_MODULE, .of_match_table = ltq_pci_match, }, }; diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 59cccd95688b..d07e04121cc6 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -708,7 +708,7 @@ static int __init octeon_pci_setup(void) if (IS_ERR(platform_device_register_simple("octeon_pci_edac", -1, NULL, 0))) - pr_err("Registation of co_pci_edac failed!\n"); + pr_err("Registration of co_pci_edac failed!\n"); octeon_pci_dma_init(); diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c new file mode 100644 index 000000000000..a4574947e698 --- /dev/null +++ b/arch/mips/pci/pci-rt2880.c @@ -0,0 +1,285 @@ +/* + * Ralink RT288x SoC PCI register definitions + * + * Copyright (C) 2009 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> + +#include <asm/mach-ralink/rt288x.h> + +#define RT2880_PCI_BASE 0x00440000 +#define RT288X_CPU_IRQ_PCI 4 + +#define RT2880_PCI_MEM_BASE 0x20000000 +#define RT2880_PCI_MEM_SIZE 0x10000000 +#define RT2880_PCI_IO_BASE 0x00460000 +#define RT2880_PCI_IO_SIZE 0x00010000 + +#define RT2880_PCI_REG_PCICFG_ADDR 0x00 +#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c +#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10 +#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18 +#define RT2880_PCI_REG_CONFIG_ADDR 0x20 +#define RT2880_PCI_REG_CONFIG_DATA 0x24 +#define RT2880_PCI_REG_MEMBASE 0x28 +#define RT2880_PCI_REG_IOBASE 0x2c +#define RT2880_PCI_REG_ID 0x30 +#define RT2880_PCI_REG_CLASS 0x34 +#define RT2880_PCI_REG_SUBID 0x38 +#define RT2880_PCI_REG_ARBCTL 0x80 + +static void __iomem *rt2880_pci_base; +static DEFINE_SPINLOCK(rt2880_pci_lock); + +static u32 rt2880_pci_reg_read(u32 reg) +{ + return readl(rt2880_pci_base + reg); +} + +static void rt2880_pci_reg_write(u32 val, u32 reg) +{ + writel(val, rt2880_pci_base + reg); +} + +static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot, + unsigned int func, unsigned int where) +{ + return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | + 0x80000000); +} + +static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + unsigned long flags; + u32 address; + u32 data; + + address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + spin_lock_irqsave(&rt2880_pci_lock, flags); + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); + spin_unlock_irqrestore(&rt2880_pci_lock, flags); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xff; + break; + case 2: + *val = (data >> ((where & 3) << 3)) & 0xffff; + break; + case 4: + *val = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + unsigned long flags; + u32 address; + u32 data; + + address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + spin_lock_irqsave(&rt2880_pci_lock, flags); + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); + + switch (size) { + case 1: + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 2: + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + break; + case 4: + data = val; + break; + } + + rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); + spin_unlock_irqrestore(&rt2880_pci_lock, flags); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops rt2880_pci_ops = { + .read = rt2880_pci_config_read, + .write = rt2880_pci_config_write, +}; + +static struct resource rt2880_pci_mem_resource = { + .name = "PCI MEM space", + .start = RT2880_PCI_MEM_BASE, + .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static struct resource rt2880_pci_io_resource = { + .name = "PCI IO space", + .start = RT2880_PCI_IO_BASE, + .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1, + .flags = IORESOURCE_IO, +}; + +static struct pci_controller rt2880_pci_controller = { + .pci_ops = &rt2880_pci_ops, + .mem_resource = &rt2880_pci_mem_resource, + .io_resource = &rt2880_pci_io_resource, +}; + +static inline u32 rt2880_pci_read_u32(unsigned long reg) +{ + unsigned long flags; + u32 address; + u32 ret; + + address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); + + spin_lock_irqsave(&rt2880_pci_lock, flags); + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); + spin_unlock_irqrestore(&rt2880_pci_lock, flags); + + return ret; +} + +static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) +{ + unsigned long flags; + u32 address; + + address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); + + spin_lock_irqsave(&rt2880_pci_lock, flags); + rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); + rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); + spin_unlock_irqrestore(&rt2880_pci_lock, flags); +} + +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + u16 cmd; + int irq = -1; + + if (dev->bus->number != 0) + return irq; + + switch (PCI_SLOT(dev->devfn)) { + case 0x00: + rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); + (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); + break; + case 0x11: + irq = RT288X_CPU_IRQ_PCI; + break; + default: + pr_err("%s:%s[%d] trying to alloc unknown pci irq\n", + __FILE__, __func__, __LINE__); + BUG(); + break; + } + + pci_write_config_byte((struct pci_dev *) dev, + PCI_CACHE_LINE_SIZE, 0x14); + pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF); + pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | + PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; + pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd); + pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE, + dev->irq); + return irq; +} + +static int rt288x_pci_probe(struct platform_device *pdev) +{ + void __iomem *io_map_base; + int i; + + rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE); + + io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE); + rt2880_pci_controller.io_map_base = (unsigned long) io_map_base; + set_io_port_base((unsigned long) io_map_base); + + ioport_resource.start = RT2880_PCI_IO_BASE; + ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; + + rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); + for (i = 0; i < 0xfffff; i++) + ; + + rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); + rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); + rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE); + rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE); + rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR); + rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID); + rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS); + rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID); + rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR); + + rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); + (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); + + register_pci_controller(&rt2880_pci_controller); + return 0; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +static const struct of_device_id rt288x_pci_match[] = { + { .compatible = "ralink,rt288x-pci" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rt288x_pci_match); + +static struct platform_driver rt288x_pci_driver = { + .probe = rt288x_pci_probe, + .driver = { + .name = "rt288x-pci", + .owner = THIS_MODULE, + .of_match_table = rt288x_pci_match, + }, +}; + +int __init pcibios_init(void) +{ + int ret = platform_driver_register(&rt288x_pci_driver); + + if (ret) + pr_info("rt288x-pci: Error registering platform driver!"); + + return ret; +} + +arch_initcall(pcibios_init); diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 72919aeef42b..ec9be8ca4ada 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -61,7 +61,6 @@ struct rt3883_pci_controller { void __iomem *base; - spinlock_t lock; struct device_node *intc_of_node; struct irq_domain *irq_domain; @@ -111,10 +110,8 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc, address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); - spin_lock_irqsave(&rpc->lock, flags); rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); ret = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); - spin_unlock_irqrestore(&rpc->lock, flags); return ret; } @@ -128,10 +125,8 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); - spin_lock_irqsave(&rpc->lock, flags); rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); - spin_unlock_irqrestore(&rpc->lock, flags); } static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc) @@ -252,10 +247,8 @@ static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn, address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); - spin_lock_irqsave(&rpc->lock, flags); rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); - spin_unlock_irqrestore(&rpc->lock, flags); switch (size) { case 1: @@ -288,7 +281,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn, address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); - spin_lock_irqsave(&rpc->lock, flags); rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); @@ -307,7 +299,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn, } rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA); - spin_unlock_irqrestore(&rpc->lock, flags); return PCIBIOS_SUCCESSFUL; } @@ -598,7 +589,6 @@ static struct platform_driver rt3883_pci_driver = { .probe = rt3883_pci_probe, .driver = { .name = "rt3883-pci", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(rt3883_pci_ids), }, }; diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c index c10fbf2a19dc..cd8ed09c4f53 100644 --- a/arch/mips/pci/pci-tx4939.c +++ b/arch/mips/pci/pci-tx4939.c @@ -103,5 +103,5 @@ void __init tx4939_setup_pcierr_irq(void) tx4927_pcierr_interrupt, 0, "PCI error", (void *)TX4939_PCIC_REG)) - pr_warning("Failed to request irq for PCIERR\n"); + pr_warn("Failed to request irq for PCIERR\n"); } diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c index 0dde80332d3a..26d2dabef281 100644 --- a/arch/mips/pci/pci-xlr.c +++ b/arch/mips/pci/pci-xlr.c @@ -260,7 +260,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) if (ret < 0) return ret; - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); return 0; } #endif diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c index 1c9897531660..ef620a4c82a5 100644 --- a/arch/mips/pmcs-msp71xx/msp_prom.c +++ b/arch/mips/pmcs-msp71xx/msp_prom.c @@ -295,7 +295,7 @@ char *prom_getenv(char *env_name) while (*var) { if (strncmp(env_name, *var, i) == 0) { - return (*var + strlen(env_name) + 1); + return *var + strlen(env_name) + 1; } var++; } diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 77e8a9620e18..b1c52ca580f9 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -16,6 +16,7 @@ choice config SOC_RT288X bool "RT288x" select MIPS_L1_CACHE_SHIFT_4 + select HW_HAS_PCI config SOC_RT305X bool "RT305x" @@ -26,7 +27,7 @@ choice select HW_HAS_PCI config SOC_MT7620 - bool "MT7620" + bool "MT7620/8" endchoice diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile index 2c09c8aa0ae2..a6c9d0061326 100644 --- a/arch/mips/ralink/Makefile +++ b/arch/mips/ralink/Makefile @@ -10,9 +10,13 @@ obj-y := prom.o of.o reset.o clk.o irq.o timer.o obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o +obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o + obj-$(CONFIG_SOC_RT288X) += rt288x.o obj-$(CONFIG_SOC_RT305X) += rt305x.o obj-$(CONFIG_SOC_RT3883) += rt3883.o obj-$(CONFIG_SOC_MT7620) += mt7620.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + +obj-$(CONFIG_DEBUG_FS) += bootrom.o diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c new file mode 100644 index 000000000000..5403468394fb --- /dev/null +++ b/arch/mips/ralink/bootrom.c @@ -0,0 +1,48 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#define BOOTROM_OFFSET 0x10118000 +#define BOOTROM_SIZE 0x8000 + +static void __iomem *membase = (void __iomem *) KSEG1ADDR(BOOTROM_OFFSET); + +static int bootrom_show(struct seq_file *s, void *unused) +{ + seq_write(s, membase, BOOTROM_SIZE); + + return 0; +} + +static int bootrom_open(struct inode *inode, struct file *file) +{ + return single_open(file, bootrom_show, NULL); +} + +static const struct file_operations bootrom_file_ops = { + .open = bootrom_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int bootrom_setup(void) +{ + if (!debugfs_create_file("bootrom", 0444, + NULL, NULL, &bootrom_file_ops)) { + pr_err("Failed to create bootrom debugfs file\n"); + + return -EINVAL; + } + + return 0; +} + +postcore_initcall(bootrom_setup); diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c index 5d0983d47161..feb5a9bf98b4 100644 --- a/arch/mips/ralink/clk.c +++ b/arch/mips/ralink/clk.c @@ -56,6 +56,12 @@ unsigned long clk_get_rate(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_get_rate); +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return -1; +} +EXPORT_SYMBOL_GPL(clk_set_rate); + void __init plat_time_init(void) { struct clk *clk; diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h index 42dfd6100a2d..8e7d8e618fb9 100644 --- a/arch/mips/ralink/common.h +++ b/arch/mips/ralink/common.h @@ -11,25 +11,6 @@ #define RAMIPS_SYS_TYPE_LEN 32 -struct ralink_pinmux_grp { - const char *name; - u32 mask; - int gpio_first; - int gpio_last; -}; - -struct ralink_pinmux { - struct ralink_pinmux_grp *mode; - struct ralink_pinmux_grp *uart; - int uart_shift; - u32 uart_mask; - void (*wdt_reset)(void); - struct ralink_pinmux_grp *pci; - int pci_shift; - u32 pci_mask; -}; -extern struct ralink_pinmux rt_gpio_pinmux; - struct ralink_soc_info { unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; unsigned char *compatible; diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c index b46d0419d09b..255d695ec8c6 100644 --- a/arch/mips/ralink/early_printk.c +++ b/arch/mips/ralink/early_printk.c @@ -12,21 +12,24 @@ #include <asm/addrspace.h> #ifdef CONFIG_SOC_RT288X -#define EARLY_UART_BASE 0x300c00 +#define EARLY_UART_BASE 0x300c00 +#define CHIPID_BASE 0x300004 +#elif defined(CONFIG_SOC_MT7621) +#define EARLY_UART_BASE 0x1E000c00 +#define CHIPID_BASE 0x1E000004 #else -#define EARLY_UART_BASE 0x10000c00 +#define EARLY_UART_BASE 0x10000c00 +#define CHIPID_BASE 0x10000004 #endif -#define UART_REG_RX 0x00 -#define UART_REG_TX 0x04 -#define UART_REG_IER 0x08 -#define UART_REG_IIR 0x0c -#define UART_REG_FCR 0x10 -#define UART_REG_LCR 0x14 -#define UART_REG_MCR 0x18 -#define UART_REG_LSR 0x1c +#define MT7628_CHIP_NAME1 0x20203832 + +#define UART_REG_TX 0x04 +#define UART_REG_LSR 0x14 +#define UART_REG_LSR_RT2880 0x1c static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE); +static __iomem void *chipid_membase = (__iomem void *) KSEG1ADDR(CHIPID_BASE); static inline void uart_w32(u32 val, unsigned reg) { @@ -38,11 +41,23 @@ static inline u32 uart_r32(unsigned reg) return __raw_readl(uart_membase + reg); } +static inline int soc_is_mt7628(void) +{ + return IS_ENABLED(CONFIG_SOC_MT7620) && + (__raw_readl(chipid_membase) == MT7628_CHIP_NAME1); +} + void prom_putchar(unsigned char ch) { - while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) - ; - uart_w32(ch, UART_REG_TX); - while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) - ; + if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) { + uart_w32(ch, UART_TX); + while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) + ; + } else { + while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0) + ; + uart_w32(ch, UART_REG_TX); + while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0) + ; + } } diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c new file mode 100644 index 000000000000..e20b02e3ae28 --- /dev/null +++ b/arch/mips/ralink/ill_acc.c @@ -0,0 +1,87 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#include <linux/interrupt.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#define REG_ILL_ACC_ADDR 0x10 +#define REG_ILL_ACC_TYPE 0x14 + +#define ILL_INT_STATUS BIT(31) +#define ILL_ACC_WRITE BIT(30) +#define ILL_ACC_LEN_M 0xff +#define ILL_ACC_OFF_M 0xf +#define ILL_ACC_OFF_S 16 +#define ILL_ACC_ID_M 0x7 +#define ILL_ACC_ID_S 8 + +#define DRV_NAME "ill_acc" + +static const char * const ill_acc_ids[] = { + "cpu", "dma", "ppe", "pdma rx", "pdma tx", "pci/e", "wmac", "usb", +}; + +static irqreturn_t ill_acc_irq_handler(int irq, void *_priv) +{ + struct device *dev = (struct device *) _priv; + u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR); + u32 type = rt_memc_r32(REG_ILL_ACC_TYPE); + + dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n", + (type & ILL_ACC_WRITE) ? ("write") : ("read"), + ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M], + addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, + type & ILL_ACC_LEN_M); + + rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); + + return IRQ_HANDLED; +} + +static int __init ill_acc_of_setup(void) +{ + struct platform_device *pdev; + struct device_node *np; + int irq; + + /* somehow this driver breaks on RT5350 */ + if (of_machine_is_compatible("ralink,rt5350-soc")) + return -EINVAL; + + np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc"); + if (!np) + return -EINVAL; + + pdev = of_find_device_by_node(np); + if (!pdev) { + pr_err("%s: failed to lookup pdev\n", np->name); + return -EINVAL; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + dev_err(&pdev->dev, "failed to get irq\n"); + return -EINVAL; + } + + if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) { + dev_err(&pdev->dev, "failed to request irq\n"); + return -EINVAL; + } + + rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE); + + dev_info(&pdev->dev, "irq registered\n"); + + return 0; +} + +arch_initcall(ill_acc_of_setup); diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 781b3d14a489..7cf91b92e9d1 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -20,14 +20,6 @@ #include "common.h" -/* INTC register offsets */ -#define INTC_REG_STATUS0 0x00 -#define INTC_REG_STATUS1 0x04 -#define INTC_REG_TYPE 0x20 -#define INTC_REG_RAW_STATUS 0x30 -#define INTC_REG_ENABLE 0x34 -#define INTC_REG_DISABLE 0x38 - #define INTC_INT_GLOBAL BIT(31) #define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) @@ -44,16 +36,36 @@ #define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9) +enum rt_intc_regs_enum { + INTC_REG_STATUS0 = 0, + INTC_REG_STATUS1, + INTC_REG_TYPE, + INTC_REG_RAW_STATUS, + INTC_REG_ENABLE, + INTC_REG_DISABLE, +}; + +static u32 rt_intc_regs[] = { + [INTC_REG_STATUS0] = 0x00, + [INTC_REG_STATUS1] = 0x04, + [INTC_REG_TYPE] = 0x20, + [INTC_REG_RAW_STATUS] = 0x30, + [INTC_REG_ENABLE] = 0x34, + [INTC_REG_DISABLE] = 0x38, +}; + static void __iomem *rt_intc_membase; +static int rt_perfcount_irq; + static inline void rt_intc_w32(u32 val, unsigned reg) { - __raw_writel(val, rt_intc_membase + reg); + __raw_writel(val, rt_intc_membase + rt_intc_regs[reg]); } static inline u32 rt_intc_r32(unsigned reg) { - return __raw_readl(rt_intc_membase + reg); + return __raw_readl(rt_intc_membase + rt_intc_regs[reg]); } static void ralink_intc_irq_unmask(struct irq_data *d) @@ -73,6 +85,11 @@ static struct irq_chip ralink_intc_irq_chip = { .irq_mask_ack = ralink_intc_irq_mask, }; +int get_c0_perfcount_int(void) +{ + return rt_perfcount_irq; +} + unsigned int get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; @@ -134,6 +151,10 @@ static int __init intc_of_init(struct device_node *node, struct irq_domain *domain; int irq; + if (!of_property_read_u32_array(node, "ralink,intc-registers", + rt_intc_regs, 6)) + pr_info("intc: using register map from devicetree\n"); + irq = irq_of_parse_and_map(node, 0); if (!irq) panic("Failed to get INTC IRQ"); @@ -167,13 +188,13 @@ static int __init intc_of_init(struct device_node *node, irq_set_handler_data(irq, domain); /* tell the kernel which irq is used for performance monitoring */ - cp0_perfcount_irq = irq_create_mapping(domain, 9); + rt_perfcount_irq = irq_create_mapping(domain, 9); return 0; } static struct of_device_id __initdata of_irq_ids[] = { - { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, + { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init }, { .compatible = "ralink,rt2880-intc", .data = intc_of_init }, {}, }; diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index a3ad56c2372d..2ea5ff6dc22e 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -17,124 +17,214 @@ #include <asm/mipsregs.h> #include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/mt7620.h> +#include <asm/mach-ralink/pinmux.h> #include "common.h" +/* analog */ +#define PMU0_CFG 0x88 +#define PMU_SW_SET BIT(28) +#define A_DCDC_EN BIT(24) +#define A_SSC_PERI BIT(19) +#define A_SSC_GEN BIT(18) +#define A_SSC_M 0x3 +#define A_SSC_S 16 +#define A_DLY_M 0x7 +#define A_DLY_S 8 +#define A_VTUNE_M 0xff + +/* digital */ +#define PMU1_CFG 0x8C +#define DIG_SW_SEL BIT(25) + +/* is this a MT7620 or a MT7628 */ +enum mt762x_soc_type mt762x_soc; + /* does the board have sdram or ddram */ static int dram_type; -static struct ralink_pinmux_grp mode_mux[] = { - { - .name = "i2c", - .mask = MT7620_GPIO_MODE_I2C, - .gpio_first = 1, - .gpio_last = 2, - }, { - .name = "spi", - .mask = MT7620_GPIO_MODE_SPI, - .gpio_first = 3, - .gpio_last = 6, - }, { - .name = "uartlite", - .mask = MT7620_GPIO_MODE_UART1, - .gpio_first = 15, - .gpio_last = 16, - }, { - .name = "wdt", - .mask = MT7620_GPIO_MODE_WDT, - .gpio_first = 17, - .gpio_last = 17, - }, { - .name = "mdio", - .mask = MT7620_GPIO_MODE_MDIO, - .gpio_first = 22, - .gpio_last = 23, - }, { - .name = "rgmii1", - .mask = MT7620_GPIO_MODE_RGMII1, - .gpio_first = 24, - .gpio_last = 35, - }, { - .name = "spi refclk", - .mask = MT7620_GPIO_MODE_SPI_REF_CLK, - .gpio_first = 37, - .gpio_last = 39, - }, { - .name = "jtag", - .mask = MT7620_GPIO_MODE_JTAG, - .gpio_first = 40, - .gpio_last = 44, - }, { - /* shared lines with jtag */ - .name = "ephy", - .mask = MT7620_GPIO_MODE_EPHY, - .gpio_first = 40, - .gpio_last = 44, - }, { - .name = "nand", - .mask = MT7620_GPIO_MODE_JTAG, - .gpio_first = 45, - .gpio_last = 59, - }, { - .name = "rgmii2", - .mask = MT7620_GPIO_MODE_RGMII2, - .gpio_first = 60, - .gpio_last = 71, - }, { - .name = "wled", - .mask = MT7620_GPIO_MODE_WLED, - .gpio_first = 72, - .gpio_last = 72, - }, {0} +static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; +static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; +static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; +static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) }; +static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; +static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; +static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; +static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; +static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; +static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; +static struct rt2880_pmx_func uartf_grp[] = { + FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8), + FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8), + FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8), + FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8), + FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4), + FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4), + FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4), +}; +static struct rt2880_pmx_func wdt_grp[] = { + FUNC("wdt rst", 0, 17, 1), + FUNC("wdt refclk", 0, 17, 1), + }; +static struct rt2880_pmx_func pcie_rst_grp[] = { + FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1), + FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1) +}; +static struct rt2880_pmx_func nd_sd_grp[] = { + FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), + FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) +}; + +static struct rt2880_pmx_group mt7620a_pinmux_data[] = { + GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C), + GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK, + MT7620_GPIO_MODE_UART0_SHIFT), + GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI), + GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1), + GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK, + MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT), + GRP("mdio", mdio_grp, 1, MT7620_GPIO_MODE_MDIO), + GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1), + GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK), + GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK, + MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT), + GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK, + MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT), + GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2), + GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED), + GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY), + GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA), + { 0 } +}; + +static struct rt2880_pmx_func pwm1_grp_mt7628[] = { + FUNC("sdcx", 3, 19, 1), + FUNC("utif", 2, 19, 1), + FUNC("gpio", 1, 19, 1), + FUNC("pwm", 0, 19, 1), +}; + +static struct rt2880_pmx_func pwm0_grp_mt7628[] = { + FUNC("sdcx", 3, 18, 1), + FUNC("utif", 2, 18, 1), + FUNC("gpio", 1, 18, 1), + FUNC("pwm", 0, 18, 1), +}; + +static struct rt2880_pmx_func uart2_grp_mt7628[] = { + FUNC("sdcx", 3, 20, 2), + FUNC("pwm", 2, 20, 2), + FUNC("gpio", 1, 20, 2), + FUNC("uart", 0, 20, 2), +}; + +static struct rt2880_pmx_func uart1_grp_mt7628[] = { + FUNC("sdcx", 3, 45, 2), + FUNC("pwm", 2, 45, 2), + FUNC("gpio", 1, 45, 2), + FUNC("uart", 0, 45, 2), +}; + +static struct rt2880_pmx_func i2c_grp_mt7628[] = { + FUNC("-", 3, 4, 2), + FUNC("debug", 2, 4, 2), + FUNC("gpio", 1, 4, 2), + FUNC("i2c", 0, 4, 2), +}; + +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) }; +static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; + +static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { + FUNC("jtag", 3, 22, 8), + FUNC("utif", 2, 22, 8), + FUNC("gpio", 1, 22, 8), + FUNC("sdcx", 0, 22, 8), +}; + +static struct rt2880_pmx_func uart0_grp_mt7628[] = { + FUNC("-", 3, 12, 2), + FUNC("-", 2, 12, 2), + FUNC("gpio", 1, 12, 2), + FUNC("uart", 0, 12, 2), +}; + +static struct rt2880_pmx_func i2s_grp_mt7628[] = { + FUNC("antenna", 3, 0, 4), + FUNC("pcm", 2, 0, 4), + FUNC("gpio", 1, 0, 4), + FUNC("i2s", 0, 0, 4), +}; + +static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { + FUNC("-", 3, 6, 1), + FUNC("refclk", 2, 6, 1), + FUNC("gpio", 1, 6, 1), + FUNC("spi", 0, 6, 1), +}; + +static struct rt2880_pmx_func spis_grp_mt7628[] = { + FUNC("pwm", 3, 14, 4), + FUNC("util", 2, 14, 4), + FUNC("gpio", 1, 14, 4), + FUNC("spis", 0, 14, 4), }; -static struct ralink_pinmux_grp uart_mux[] = { - { - .name = "uartf", - .mask = MT7620_GPIO_MODE_UARTF, - .gpio_first = 7, - .gpio_last = 14, - }, { - .name = "pcm uartf", - .mask = MT7620_GPIO_MODE_PCM_UARTF, - .gpio_first = 7, - .gpio_last = 14, - }, { - .name = "pcm i2s", - .mask = MT7620_GPIO_MODE_PCM_I2S, - .gpio_first = 7, - .gpio_last = 14, - }, { - .name = "i2s uartf", - .mask = MT7620_GPIO_MODE_I2S_UARTF, - .gpio_first = 7, - .gpio_last = 14, - }, { - .name = "pcm gpio", - .mask = MT7620_GPIO_MODE_PCM_GPIO, - .gpio_first = 11, - .gpio_last = 14, - }, { - .name = "gpio uartf", - .mask = MT7620_GPIO_MODE_GPIO_UARTF, - .gpio_first = 7, - .gpio_last = 10, - }, { - .name = "gpio i2s", - .mask = MT7620_GPIO_MODE_GPIO_I2S, - .gpio_first = 7, - .gpio_last = 10, - }, { - .name = "gpio", - .mask = MT7620_GPIO_MODE_GPIO, - }, {0} +static struct rt2880_pmx_func gpio_grp_mt7628[] = { + FUNC("pcie", 3, 11, 1), + FUNC("refclk", 2, 11, 1), + FUNC("gpio", 1, 11, 1), + FUNC("gpio", 0, 11, 1), }; -struct ralink_pinmux rt_gpio_pinmux = { - .mode = mode_mux, - .uart = uart_mux, - .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT, - .uart_mask = MT7620_GPIO_MODE_UART0_MASK, +#define MT7628_GPIO_MODE_MASK 0x3 + +#define MT7628_GPIO_MODE_PWM1 30 +#define MT7628_GPIO_MODE_PWM0 28 +#define MT7628_GPIO_MODE_UART2 26 +#define MT7628_GPIO_MODE_UART1 24 +#define MT7628_GPIO_MODE_I2C 20 +#define MT7628_GPIO_MODE_REFCLK 18 +#define MT7628_GPIO_MODE_PERST 16 +#define MT7628_GPIO_MODE_WDT 14 +#define MT7628_GPIO_MODE_SPI 12 +#define MT7628_GPIO_MODE_SDMODE 10 +#define MT7628_GPIO_MODE_UART0 8 +#define MT7628_GPIO_MODE_I2S 6 +#define MT7628_GPIO_MODE_CS1 4 +#define MT7628_GPIO_MODE_SPIS 2 +#define MT7628_GPIO_MODE_GPIO 0 + +static struct rt2880_pmx_group mt7628an_pinmux_data[] = { + GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_PWM1), + GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_PWM0), + GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_UART2), + GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_UART1), + GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_I2C), + GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK), + GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST), + GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT), + GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI), + GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_SDMODE), + GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_UART0), + GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_I2S), + GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_CS1), + GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_SPIS), + GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_GPIO), + { 0 } }; static __init u32 @@ -287,29 +377,42 @@ void __init ralink_clk_init(void) xtal_rate = mt7620_get_xtal_rate(); - cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate); - pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate); - - cpu_rate = mt7620_get_cpu_rate(pll_rate); - dram_rate = mt7620_get_dram_rate(pll_rate); - sys_rate = mt7620_get_sys_rate(cpu_rate); - periph_rate = mt7620_get_periph_rate(xtal_rate); - #define RFMT(label) label ":%lu.%03luMHz " #define RINT(x) ((x) / 1000000) #define RFRAC(x) (((x) / 1000) % 1000) - pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"), - RINT(xtal_rate), RFRAC(xtal_rate), - RINT(cpu_pll_rate), RFRAC(cpu_pll_rate), - RINT(pll_rate), RFRAC(pll_rate)); + if (mt762x_soc == MT762X_SOC_MT7628AN) { + if (xtal_rate == MHZ(40)) + cpu_rate = MHZ(580); + else + cpu_rate = MHZ(575); + dram_rate = sys_rate = cpu_rate / 3; + periph_rate = MHZ(40); + + ralink_clk_add("10000d00.uartlite", periph_rate); + ralink_clk_add("10000e00.uartlite", periph_rate); + } else { + cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate); + pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate); + + cpu_rate = mt7620_get_cpu_rate(pll_rate); + dram_rate = mt7620_get_dram_rate(pll_rate); + sys_rate = mt7620_get_sys_rate(cpu_rate); + periph_rate = mt7620_get_periph_rate(xtal_rate); + + pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"), + RINT(xtal_rate), RFRAC(xtal_rate), + RINT(cpu_pll_rate), RFRAC(cpu_pll_rate), + RINT(pll_rate), RFRAC(pll_rate)); + + ralink_clk_add("10000500.uart", periph_rate); + } pr_debug(RFMT("CPU") RFMT("DRAM") RFMT("SYS") RFMT("PERIPH"), RINT(cpu_rate), RFRAC(cpu_rate), RINT(dram_rate), RFRAC(dram_rate), RINT(sys_rate), RFRAC(sys_rate), RINT(periph_rate), RFRAC(periph_rate)); - #undef RFRAC #undef RINT #undef RFMT @@ -317,9 +420,9 @@ void __init ralink_clk_init(void) ralink_clk_add("cpu", cpu_rate); ralink_clk_add("10000100.timer", periph_rate); ralink_clk_add("10000120.watchdog", periph_rate); - ralink_clk_add("10000500.uart", periph_rate); ralink_clk_add("10000b00.spi", sys_rate); ralink_clk_add("10000c00.uartlite", periph_rate); + ralink_clk_add("10180000.wmac", xtal_rate); } void __init ralink_of_remap(void) @@ -331,6 +434,52 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } +static __init void +mt7620_dram_init(struct ralink_soc_info *soc_info) +{ + switch (dram_type) { + case SYSCFG0_DRAM_TYPE_SDRAM: + pr_info("Board has SDRAM\n"); + soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN; + soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR1: + pr_info("Board has DDR1\n"); + soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR2: + pr_info("Board has DDR2\n"); + soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX; + break; + default: + BUG(); + } +} + +static __init void +mt7628_dram_init(struct ralink_soc_info *soc_info) +{ + switch (dram_type) { + case SYSCFG0_DRAM_TYPE_DDR1_MT7628: + pr_info("Board has DDR1\n"); + soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR2_MT7628: + pr_info("Board has DDR2\n"); + soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX; + break; + default: + BUG(); + } +} + void prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); @@ -339,22 +488,36 @@ void prom_soc_init(struct ralink_soc_info *soc_info) u32 n1; u32 rev; u32 cfg0; + u32 pmu0; + u32 pmu1; + u32 bga; n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); - - if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) { - name = "MT7620N"; - soc_info->compatible = "ralink,mt7620n-soc"; - } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) { - name = "MT7620A"; - soc_info->compatible = "ralink,mt7620a-soc"; + rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); + bga = (rev >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK; + + if (n0 == MT7620_CHIP_NAME0 && n1 == MT7620_CHIP_NAME1) { + if (bga) { + mt762x_soc = MT762X_SOC_MT7620A; + name = "MT7620A"; + soc_info->compatible = "ralink,mt7620a-soc"; + } else { + mt762x_soc = MT762X_SOC_MT7620N; + name = "MT7620N"; + soc_info->compatible = "ralink,mt7620n-soc"; +#ifdef CONFIG_PCI + panic("mt7620n is only supported for non pci kernels"); +#endif + } + } else if (n0 == MT7620_CHIP_NAME0 && n1 == MT7628_CHIP_NAME1) { + mt762x_soc = MT762X_SOC_MT7628AN; + name = "MT7628AN"; + soc_info->compatible = "ralink,mt7628an-soc"; } else { - panic("mt7620: unknown SoC, n0:%08x n1:%08x", n0, n1); + panic("mt762x: unknown SoC, n0:%08x n1:%08x\n", n0, n1); } - rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); - snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, "Ralink %s ver:%u eco:%u", name, @@ -364,26 +527,22 @@ void prom_soc_init(struct ralink_soc_info *soc_info) cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0); dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK; - switch (dram_type) { - case SYSCFG0_DRAM_TYPE_SDRAM: - pr_info("Board has SDRAM\n"); - soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN; - soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX; - break; - - case SYSCFG0_DRAM_TYPE_DDR1: - pr_info("Board has DDR1\n"); - soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN; - soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX; - break; - - case SYSCFG0_DRAM_TYPE_DDR2: - pr_info("Board has DDR2\n"); - soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN; - soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX; - break; - default: - BUG(); - } soc_info->mem_base = MT7620_DRAM_BASE; + if (mt762x_soc == MT762X_SOC_MT7628AN) + mt7628_dram_init(soc_info); + else + mt7620_dram_init(soc_info); + + pmu0 = __raw_readl(sysc + PMU0_CFG); + pmu1 = __raw_readl(sysc + PMU1_CFG); + + pr_info("Analog PMU set to %s control\n", + (pmu0 & PMU_SW_SET) ? ("sw") : ("hw")); + pr_info("Digital PMU set to %s control\n", + (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw")); + + if (mt762x_soc == MT762X_SOC_MT7628AN) + rt2880_pinmux_data = mt7628an_pinmux_data; + else + rt2880_pinmux_data = mt7620a_pinmux_data; } diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 7c4598cb6de8..0d30dcd63246 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -53,6 +53,17 @@ void __init device_tree_init(void) unflatten_and_copy_device_tree(); } +static int memory_dtb; + +static int __init early_init_dt_find_memory(unsigned long node, + const char *uname, int depth, void *data) +{ + if (depth == 1 && !strcmp(uname, "memory@0")) + memory_dtb = 1; + + return 0; +} + void __init plat_mem_setup(void) { set_io_port_base(KSEG1); @@ -63,7 +74,12 @@ void __init plat_mem_setup(void) */ __dt_setup_arch(__dtb_start); - if (soc_info.mem_size) + strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); + + of_scan_flat_dt(early_init_dt_find_memory, NULL); + if (memory_dtb) + of_scan_flat_dt(early_init_dt_scan_memory, NULL); + else if (soc_info.mem_size) add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, BOOT_MEM_RAM); else @@ -74,19 +90,9 @@ void __init plat_mem_setup(void) static int __init plat_of_setup(void) { - static struct of_device_id of_ids[3]; - int len = sizeof(of_ids[0].compatible); - - if (!of_have_populated_dt()) - panic("device tree not present"); - - strlcpy(of_ids[0].compatible, soc_info.compatible, len); - strlcpy(of_ids[1].compatible, "palmbus", len); - - if (of_platform_populate(NULL, of_ids, NULL, NULL)) - panic("failed to populate DT"); + __dt_register_buses(soc_info.compatible, "palmbus"); - /* make sure ithat the reset controller is setup early */ + /* make sure that the reset controller is setup early */ ralink_rst_init(); return 0; diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c index 9c64f029d047..09419f67da39 100644 --- a/arch/mips/ralink/prom.c +++ b/arch/mips/ralink/prom.c @@ -18,6 +18,7 @@ #include "common.h" struct ralink_soc_info soc_info; +struct rt2880_pmx_group *rt2880_pinmux_data = NULL; const char *get_system_type(void) { diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c index f87de1ab2198..738cec865f41 100644 --- a/arch/mips/ralink/rt288x.c +++ b/arch/mips/ralink/rt288x.c @@ -17,46 +17,27 @@ #include <asm/mipsregs.h> #include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/rt288x.h> +#include <asm/mach-ralink/pinmux.h> #include "common.h" -static struct ralink_pinmux_grp mode_mux[] = { - { - .name = "i2c", - .mask = RT2880_GPIO_MODE_I2C, - .gpio_first = 1, - .gpio_last = 2, - }, { - .name = "spi", - .mask = RT2880_GPIO_MODE_SPI, - .gpio_first = 3, - .gpio_last = 6, - }, { - .name = "uartlite", - .mask = RT2880_GPIO_MODE_UART0, - .gpio_first = 7, - .gpio_last = 14, - }, { - .name = "jtag", - .mask = RT2880_GPIO_MODE_JTAG, - .gpio_first = 17, - .gpio_last = 21, - }, { - .name = "mdio", - .mask = RT2880_GPIO_MODE_MDIO, - .gpio_first = 22, - .gpio_last = 23, - }, { - .name = "sdram", - .mask = RT2880_GPIO_MODE_SDRAM, - .gpio_first = 24, - .gpio_last = 39, - }, { - .name = "pci", - .mask = RT2880_GPIO_MODE_PCI, - .gpio_first = 40, - .gpio_last = 71, - }, {0} +static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; +static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; +static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) }; + +static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { + GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C), + GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI), + GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0), + GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG), + GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO), + GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM), + GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI), + { 0 } }; static void rt288x_wdt_reset(void) @@ -69,14 +50,9 @@ static void rt288x_wdt_reset(void) rt_sysc_w32(t, SYSC_REG_CLKCFG); } -struct ralink_pinmux rt_gpio_pinmux = { - .mode = mode_mux, - .wdt_reset = rt288x_wdt_reset, -}; - void __init ralink_clk_init(void) { - unsigned long cpu_rate; + unsigned long cpu_rate, wmac_rate = 40000000; u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK); @@ -101,6 +77,7 @@ void __init ralink_clk_init(void) ralink_clk_add("300500.uart", cpu_rate / 2); ralink_clk_add("300c00.uartlite", cpu_rate / 2); ralink_clk_add("400000.ethernet", cpu_rate / 2); + ralink_clk_add("480000.wmac", wmac_rate); } void __init ralink_of_remap(void) @@ -140,4 +117,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info) soc_info->mem_base = RT2880_SDRAM_BASE; soc_info->mem_size_min = RT2880_MEM_SIZE_MIN; soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; + + rt2880_pinmux_data = rt2880_pinmux_data_act; } diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c index bb82a82da9e7..c40776ab67db 100644 --- a/arch/mips/ralink/rt305x.c +++ b/arch/mips/ralink/rt305x.c @@ -17,90 +17,78 @@ #include <asm/mipsregs.h> #include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/rt305x.h> +#include <asm/mach-ralink/pinmux.h> #include "common.h" enum rt305x_soc_type rt305x_soc; -static struct ralink_pinmux_grp mode_mux[] = { - { - .name = "i2c", - .mask = RT305X_GPIO_MODE_I2C, - .gpio_first = RT305X_GPIO_I2C_SD, - .gpio_last = RT305X_GPIO_I2C_SCLK, - }, { - .name = "spi", - .mask = RT305X_GPIO_MODE_SPI, - .gpio_first = RT305X_GPIO_SPI_EN, - .gpio_last = RT305X_GPIO_SPI_CLK, - }, { - .name = "uartlite", - .mask = RT305X_GPIO_MODE_UART1, - .gpio_first = RT305X_GPIO_UART1_TXD, - .gpio_last = RT305X_GPIO_UART1_RXD, - }, { - .name = "jtag", - .mask = RT305X_GPIO_MODE_JTAG, - .gpio_first = RT305X_GPIO_JTAG_TDO, - .gpio_last = RT305X_GPIO_JTAG_TDI, - }, { - .name = "mdio", - .mask = RT305X_GPIO_MODE_MDIO, - .gpio_first = RT305X_GPIO_MDIO_MDC, - .gpio_last = RT305X_GPIO_MDIO_MDIO, - }, { - .name = "sdram", - .mask = RT305X_GPIO_MODE_SDRAM, - .gpio_first = RT305X_GPIO_SDRAM_MD16, - .gpio_last = RT305X_GPIO_SDRAM_MD31, - }, { - .name = "rgmii", - .mask = RT305X_GPIO_MODE_RGMII, - .gpio_first = RT305X_GPIO_GE0_TXD0, - .gpio_last = RT305X_GPIO_GE0_RXCLK, - }, {0} +static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct rt2880_pmx_func uartf_func[] = { + FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8), + FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8), + FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8), + FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8), + FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4), + FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4), + FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4), +}; +static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; +static struct rt2880_pmx_func rt5350_cs1_func[] = { + FUNC("spi_cs1", 0, 27, 1), + FUNC("wdg_cs1", 1, 27, 1), +}; +static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; +static struct rt2880_pmx_func rt3352_rgmii_func[] = { + FUNC("rgmii", 0, 24, 12) +}; +static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; +static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; +static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; +static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; + +static struct rt2880_pmx_group rt3050_pinmux_data[] = { + GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), + GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), + GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, + RT305X_GPIO_MODE_UART0_SHIFT), + GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), + GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), + GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO), + GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII), + GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM), + { 0 } }; -static struct ralink_pinmux_grp uart_mux[] = { - { - .name = "uartf", - .mask = RT305X_GPIO_MODE_UARTF, - .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, - }, { - .name = "pcm uartf", - .mask = RT305X_GPIO_MODE_PCM_UARTF, - .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, - }, { - .name = "pcm i2s", - .mask = RT305X_GPIO_MODE_PCM_I2S, - .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, - }, { - .name = "i2s uartf", - .mask = RT305X_GPIO_MODE_I2S_UARTF, - .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, - }, { - .name = "pcm gpio", - .mask = RT305X_GPIO_MODE_PCM_GPIO, - .gpio_first = RT305X_GPIO_10, - .gpio_last = RT305X_GPIO_14, - }, { - .name = "gpio uartf", - .mask = RT305X_GPIO_MODE_GPIO_UARTF, - .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_10, - }, { - .name = "gpio i2s", - .mask = RT305X_GPIO_MODE_GPIO_I2S, - .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_10, - }, { - .name = "gpio", - .mask = RT305X_GPIO_MODE_GPIO, - }, {0} +static struct rt2880_pmx_group rt3352_pinmux_data[] = { + GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), + GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), + GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, + RT305X_GPIO_MODE_UART0_SHIFT), + GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), + GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), + GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO), + GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII), + GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA), + GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA), + GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED), + { 0 } +}; + +static struct rt2880_pmx_group rt5350_pinmux_data[] = { + GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), + GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), + GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, + RT305X_GPIO_MODE_UART0_SHIFT), + GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), + GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), + GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED), + GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1), + { 0 } }; static void rt305x_wdt_reset(void) @@ -114,14 +102,6 @@ static void rt305x_wdt_reset(void) rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); } -struct ralink_pinmux rt_gpio_pinmux = { - .mode = mode_mux, - .uart = uart_mux, - .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, - .uart_mask = RT305X_GPIO_MODE_UART0_MASK, - .wdt_reset = rt305x_wdt_reset, -}; - static unsigned long rt5350_get_mem_size(void) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); @@ -290,11 +270,14 @@ void prom_soc_init(struct ralink_soc_info *soc_info) soc_info->mem_base = RT305X_SDRAM_BASE; if (soc_is_rt5350()) { soc_info->mem_size = rt5350_get_mem_size(); + rt2880_pinmux_data = rt5350_pinmux_data; } else if (soc_is_rt305x() || soc_is_rt3350()) { soc_info->mem_size_min = RT305X_MEM_SIZE_MIN; soc_info->mem_size_max = RT305X_MEM_SIZE_MAX; + rt2880_pinmux_data = rt3050_pinmux_data; } else if (soc_is_rt3352()) { soc_info->mem_size_min = RT3352_MEM_SIZE_MIN; soc_info->mem_size_max = RT3352_MEM_SIZE_MAX; + rt2880_pinmux_data = rt3352_pinmux_data; } } diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c index b474ac284b83..86a535c770d8 100644 --- a/arch/mips/ralink/rt3883.c +++ b/arch/mips/ralink/rt3883.c @@ -17,132 +17,50 @@ #include <asm/mipsregs.h> #include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/rt3883.h> +#include <asm/mach-ralink/pinmux.h> #include "common.h" -static struct ralink_pinmux_grp mode_mux[] = { - { - .name = "i2c", - .mask = RT3883_GPIO_MODE_I2C, - .gpio_first = RT3883_GPIO_I2C_SD, - .gpio_last = RT3883_GPIO_I2C_SCLK, - }, { - .name = "spi", - .mask = RT3883_GPIO_MODE_SPI, - .gpio_first = RT3883_GPIO_SPI_CS0, - .gpio_last = RT3883_GPIO_SPI_MISO, - }, { - .name = "uartlite", - .mask = RT3883_GPIO_MODE_UART1, - .gpio_first = RT3883_GPIO_UART1_TXD, - .gpio_last = RT3883_GPIO_UART1_RXD, - }, { - .name = "jtag", - .mask = RT3883_GPIO_MODE_JTAG, - .gpio_first = RT3883_GPIO_JTAG_TDO, - .gpio_last = RT3883_GPIO_JTAG_TCLK, - }, { - .name = "mdio", - .mask = RT3883_GPIO_MODE_MDIO, - .gpio_first = RT3883_GPIO_MDIO_MDC, - .gpio_last = RT3883_GPIO_MDIO_MDIO, - }, { - .name = "ge1", - .mask = RT3883_GPIO_MODE_GE1, - .gpio_first = RT3883_GPIO_GE1_TXD0, - .gpio_last = RT3883_GPIO_GE1_RXCLK, - }, { - .name = "ge2", - .mask = RT3883_GPIO_MODE_GE2, - .gpio_first = RT3883_GPIO_GE2_TXD0, - .gpio_last = RT3883_GPIO_GE2_RXCLK, - }, { - .name = "pci", - .mask = RT3883_GPIO_MODE_PCI, - .gpio_first = RT3883_GPIO_PCI_AD0, - .gpio_last = RT3883_GPIO_PCI_AD31, - }, { - .name = "lna a", - .mask = RT3883_GPIO_MODE_LNA_A, - .gpio_first = RT3883_GPIO_LNA_PE_A0, - .gpio_last = RT3883_GPIO_LNA_PE_A2, - }, { - .name = "lna g", - .mask = RT3883_GPIO_MODE_LNA_G, - .gpio_first = RT3883_GPIO_LNA_PE_G0, - .gpio_last = RT3883_GPIO_LNA_PE_G2, - }, {0} +static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct rt2880_pmx_func uartf_func[] = { + FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), + FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), + FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), + FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8), + FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4), + FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), + FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), }; - -static struct ralink_pinmux_grp uart_mux[] = { - { - .name = "uartf", - .mask = RT3883_GPIO_MODE_UARTF, - .gpio_first = RT3883_GPIO_7, - .gpio_last = RT3883_GPIO_14, - }, { - .name = "pcm uartf", - .mask = RT3883_GPIO_MODE_PCM_UARTF, - .gpio_first = RT3883_GPIO_7, - .gpio_last = RT3883_GPIO_14, - }, { - .name = "pcm i2s", - .mask = RT3883_GPIO_MODE_PCM_I2S, - .gpio_first = RT3883_GPIO_7, - .gpio_last = RT3883_GPIO_14, - }, { - .name = "i2s uartf", - .mask = RT3883_GPIO_MODE_I2S_UARTF, - .gpio_first = RT3883_GPIO_7, - .gpio_last = RT3883_GPIO_14, - }, { - .name = "pcm gpio", - .mask = RT3883_GPIO_MODE_PCM_GPIO, - .gpio_first = RT3883_GPIO_11, - .gpio_last = RT3883_GPIO_14, - }, { - .name = "gpio uartf", - .mask = RT3883_GPIO_MODE_GPIO_UARTF, - .gpio_first = RT3883_GPIO_7, - .gpio_last = RT3883_GPIO_10, - }, { - .name = "gpio i2s", - .mask = RT3883_GPIO_MODE_GPIO_I2S, - .gpio_first = RT3883_GPIO_7, - .gpio_last = RT3883_GPIO_10, - }, { - .name = "gpio", - .mask = RT3883_GPIO_MODE_GPIO, - }, {0} +static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; +static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; +static struct rt2880_pmx_func pci_func[] = { + FUNC("pci-dev", 0, 40, 32), + FUNC("pci-host2", 1, 40, 32), + FUNC("pci-host1", 2, 40, 32), + FUNC("pci-fnc", 3, 40, 32) }; - -static struct ralink_pinmux_grp pci_mux[] = { - { - .name = "pci-dev", - .mask = 0, - .gpio_first = RT3883_GPIO_PCI_AD0, - .gpio_last = RT3883_GPIO_PCI_AD31, - }, { - .name = "pci-host2", - .mask = 1, - .gpio_first = RT3883_GPIO_PCI_AD0, - .gpio_last = RT3883_GPIO_PCI_AD31, - }, { - .name = "pci-host1", - .mask = 2, - .gpio_first = RT3883_GPIO_PCI_AD0, - .gpio_last = RT3883_GPIO_PCI_AD31, - }, { - .name = "pci-fnc", - .mask = 3, - .gpio_first = RT3883_GPIO_PCI_AD0, - .gpio_last = RT3883_GPIO_PCI_AD31, - }, { - .name = "pci-gpio", - .mask = 7, - .gpio_first = RT3883_GPIO_PCI_AD0, - .gpio_last = RT3883_GPIO_PCI_AD31, - }, {0} +static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; +static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; + +static struct rt2880_pmx_group rt3883_pinmux_data[] = { + GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), + GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), + GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, + RT3883_GPIO_MODE_UART0_SHIFT), + GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1), + GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG), + GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO), + GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A), + GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G), + GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK, + RT3883_GPIO_MODE_PCI_SHIFT), + GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1), + GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2), + { 0 } }; static void rt3883_wdt_reset(void) @@ -155,17 +73,6 @@ static void rt3883_wdt_reset(void) rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); } -struct ralink_pinmux rt_gpio_pinmux = { - .mode = mode_mux, - .uart = uart_mux, - .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT, - .uart_mask = RT3883_GPIO_MODE_UART0_MASK, - .wdt_reset = rt3883_wdt_reset, - .pci = pci_mux, - .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT, - .pci_mask = RT3883_GPIO_MODE_PCI_MASK, -}; - void __init ralink_clk_init(void) { unsigned long cpu_rate, sys_rate; @@ -204,6 +111,7 @@ void __init ralink_clk_init(void) ralink_clk_add("10000b00.spi", sys_rate); ralink_clk_add("10000c00.uartlite", 40000000); ralink_clk_add("10100000.ethernet", sys_rate); + ralink_clk_add("10180000.wmac", 40000000); } void __init ralink_of_remap(void) @@ -243,4 +151,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info) soc_info->mem_base = RT3883_SDRAM_BASE; soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; + + rt2880_pinmux_data = rt3883_pinmux_data; } diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c index 5bb29b3790ff..82c72a15bf75 100644 --- a/arch/mips/ralink/timer.c +++ b/arch/mips/ralink/timer.c @@ -173,7 +173,6 @@ static struct platform_driver rt_timer_driver = { .remove = rt_timer_remove, .driver = { .name = "rt-timer", - .owner = THIS_MODULE, .of_match_table = rt_timer_match }, }; diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index a18007613c30..5aa3df853082 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c @@ -79,7 +79,7 @@ static inline void rb532_set_bit(unsigned bitval, */ static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr) { - return (readl(ioaddr) & (1 << offset)); + return readl(ioaddr) & (1 << offset); } /* diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c index a757ded437cd..657210e767c2 100644 --- a/arch/mips/rb532/prom.c +++ b/arch/mips/rb532/prom.c @@ -122,8 +122,8 @@ void __init prom_setup_cmdline(void) void __init prom_init(void) { struct ddr_ram __iomem *ddr; - phys_t memsize; - phys_t ddrbase; + phys_addr_t memsize; + phys_addr_t ddrbase; ddr = ioremap_nocache(ddr_reg[0].start, ddr_reg[0].end - ddr_reg[0].start); @@ -133,8 +133,8 @@ void __init prom_init(void) return; } - ddrbase = (phys_t)&ddr->ddrbase; - memsize = (phys_t)&ddr->ddrmask; + ddrbase = (phys_addr_t)&ddr->ddrbase; + memsize = (phys_addr_t)&ddr->ddrmask; memsize = 0 - memsize; prom_setup_cmdline(); diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c index 7cec0a4e527d..6b009c45abed 100644 --- a/arch/mips/sgi-ip22/ip22-mc.c +++ b/arch/mips/sgi-ip22/ip22-mc.c @@ -24,14 +24,12 @@ EXPORT_SYMBOL(sgimc); static inline unsigned long get_bank_addr(unsigned int memconfig) { - return ((memconfig & SGIMC_MCONFIG_BASEADDR) << - ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22)); + return (memconfig & SGIMC_MCONFIG_BASEADDR) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22); } static inline unsigned long get_bank_size(unsigned int memconfig) { - return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) << - ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14); + return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14); } static inline unsigned int get_bank_config(int bank) diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 3f47346608d7..712cc0f6a58d 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -338,7 +338,7 @@ static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr) PHYS_TO_XKSEG_UNCACHED(pte); a = (a & 0x3f) << 6; /* PFN */ a += vaddr & ((1 << pgsz) - 1); - return (cpu_err_addr == a); + return cpu_err_addr == a; } } } @@ -351,7 +351,7 @@ static int check_vdma_memaddr(void) u32 a = sgimc->maddronly; if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */ - return (cpu_err_addr == a); + return cpu_err_addr == a; if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) || check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) || @@ -367,7 +367,7 @@ static int check_vdma_gioaddr(void) if (gio_err_stat & GIO_ERRMASK) { u32 a = sgimc->gio_dma_trans; a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a); - return (gio_err_addr == a); + return gio_err_addr == a; } return 0; } diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index 7a53b1e28a93..ecbb62f339c5 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c @@ -125,8 +125,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode) #endif offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask))) - return (TO_NODE(nasid, offset) >> PAGE_SHIFT); + return TO_NODE(nasid, offset) >> PAGE_SHIFT; else - return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> - PAGE_SHIFT); + return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT; } diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index a304bcc37e4f..0b68469e063f 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -42,8 +42,7 @@ static int fine_mode; static int is_fine_dirmode(void) { - return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) - >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE); + return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE; } static hubreg_t get_region(cnodeid_t cnode) @@ -288,7 +287,7 @@ static unsigned long __init slot_psize_compute(cnodeid_t node, int slot) if (size <= 128) { if (slot % 4 == 0) { size <<= 20; /* size in bytes */ - return(size >> PAGE_SHIFT); + return size >> PAGE_SHIFT; } else return 0; } else { diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index 588e1806a1a3..c1a11a11db7f 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -38,7 +38,7 @@ #define MAX_RAM_SIZE (~0ULL) #else #ifdef CONFIG_HIGHMEM -#ifdef CONFIG_64BIT_PHYS_ADDR +#ifdef CONFIG_PHYS_ADDR_T_64BIT #define MAX_RAM_SIZE (~0ULL) #else #define MAX_RAM_SIZE (0xffffffffULL) @@ -49,8 +49,8 @@ #endif #define SIBYTE_MAX_MEM_REGIONS 8 -phys_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS]; -phys_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS]; +phys_addr_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS]; +phys_addr_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS]; unsigned int board_mem_region_count; int cfe_cons_handle; @@ -96,7 +96,7 @@ static void __noreturn cfe_linux_halt(void) static __init void prom_meminit(void) { - u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */ + u64 addr, size, type; /* regardless of PHYS_ADDR_T_64BIT */ int mem_flags = 0; unsigned int idx; int rd_flag; diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c index 9480c14ec66a..1cecdcf85cf1 100644 --- a/arch/mips/sibyte/swarm/platform.c +++ b/arch/mips/sibyte/swarm/platform.c @@ -50,7 +50,7 @@ static struct platform_device swarm_pata_device = { static int __init swarm_pata_init(void) { u8 __iomem *base; - phys_t offset, size; + phys_addr_t offset, size; struct resource *r; if (!SIBYTE_HAVE_IDE) diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c index b732600b47f5..e62466445f08 100644 --- a/arch/mips/sibyte/swarm/rtc_m41t81.c +++ b/arch/mips/sibyte/swarm/rtc_m41t81.c @@ -109,7 +109,7 @@ static int m41t81_read(uint8_t addr) return -1; } - return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff); + return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff; } static int m41t81_write(uint8_t addr, int b) @@ -229,5 +229,5 @@ int m41t81_probe(void) tmp = m41t81_read(M41T81REG_SC); m41t81_write(M41T81REG_SC, tmp & 0x7f); - return (m41t81_read(M41T81REG_SC) != -1); + return m41t81_read(M41T81REG_SC) != -1; } diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c index 178a824b28d4..50a82c495427 100644 --- a/arch/mips/sibyte/swarm/rtc_xicor1241.c +++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c @@ -84,7 +84,7 @@ static int xicor_read(uint8_t addr) return -1; } - return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff); + return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff; } static int xicor_write(uint8_t addr, int b) @@ -206,5 +206,5 @@ unsigned long xicor_get_time(void) int xicor_probe(void) { - return (xicor_read(X1241REG_SC) != -1); + return xicor_read(X1241REG_SC) != -1; } diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 3462c831d0ea..494fb0a475ac 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -76,7 +76,7 @@ int swarm_be_handler(struct pt_regs *regs, int is_fixup) printk("DBE physical address: %010Lx\n", __read_64bit_c0_register($26, 1)); } - return (is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL); + return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL; } enum swarm_rtc_type { diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c index e714d6ce9a82..a4664cb6c1e1 100644 --- a/arch/mips/txx9/generic/setup_tx4927.c +++ b/arch/mips/txx9/generic/setup_tx4927.c @@ -29,8 +29,8 @@ static void __init tx4927_wdr_init(void) { /* report watchdog reset status */ if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST) - pr_warning("Watchdog reset detected at 0x%lx\n", - read_c0_errorepc()); + pr_warn("Watchdog reset detected at 0x%lx\n", + read_c0_errorepc()); /* clear WatchDogReset (W1C) */ tx4927_ccfg_set(TX4927_CCFG_WDRST); /* do reset on watchdog */ diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c index 0a3bf2dfaba1..58cdb2aba5e1 100644 --- a/arch/mips/txx9/generic/setup_tx4938.c +++ b/arch/mips/txx9/generic/setup_tx4938.c @@ -31,8 +31,8 @@ static void __init tx4938_wdr_init(void) { /* report watchdog reset status */ if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST) - pr_warning("Watchdog reset detected at 0x%lx\n", - read_c0_errorepc()); + pr_warn("Watchdog reset detected at 0x%lx\n", + read_c0_errorepc()); /* clear WatchDogReset (W1C) */ tx4938_ccfg_set(TX4938_CCFG_WDRST); /* do reset on watchdog */ diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index b7eccbd17bf7..e3733cde50d6 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -35,8 +35,8 @@ static void __init tx4939_wdr_init(void) { /* report watchdog reset status */ if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST) - pr_warning("Watchdog reset detected at 0x%lx\n", - read_c0_errorepc()); + pr_warn("Watchdog reset detected at 0x%lx\n", + read_c0_errorepc()); /* clear WatchDogReset (W1C) */ tx4939_ccfg_set(TX4939_CCFG_WDRST); /* do reset on watchdog */ diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 2da5f25f98bc..37030409745c 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -245,7 +245,6 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev) static struct platform_driver rbtx4939_led_driver = { .driver = { .name = "rbtx4939-led", - .owner = THIS_MODULE, }, }; diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 54a062cb9f2c..f892d9de47d9 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index 6aa3ce1854aa..cab7d6d50051 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -80,4 +80,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index bb160be0dc28..01c75f36e8b3 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -19,7 +19,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 89b61d7dc790..91f1f360a7c4 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -25,7 +25,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c index 2bedafea3d94..97a7bf8df348 100644 --- a/arch/parisc/hpux/fs.c +++ b/arch/parisc/hpux/fs.c @@ -56,11 +56,12 @@ struct getdents_callback { #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) -static int filldir(void * __buf, const char * name, int namlen, loff_t offset, - u64 ino, unsigned d_type) +static int filldir(struct dir_context *ctx, const char *name, int namlen, + loff_t offset, u64 ino, unsigned d_type) { struct hpux_dirent __user * dirent; - struct getdents_callback * buf = (struct getdents_callback *) __buf; + struct getdents_callback *buf = + container_of(ctx, struct getdents_callback, ctx); ino_t d_ino; int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 1, sizeof(long)); diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index ffb024b8423f..8686237a3c3c 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += hash.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index fe35ceacf0e7..a5cd40cd8ee1 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -79,4 +79,9 @@ #define SO_BPF_EXTENSIONS 0x4029 +#define SO_INCOMING_CPU 0x402A + +#define SO_ATTACH_BPF 0x402B +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S index f8c45cc2947d..536ef66bb94b 100644 --- a/arch/parisc/lib/fixup.S +++ b/arch/parisc/lib/fixup.S @@ -38,14 +38,14 @@ LDREGX \t2(\t1),\t2 addil LT%exception_data,%r27 LDREG RT%exception_data(%r1),\t1 - /* t1 = &__get_cpu_var(exception_data) */ + /* t1 = this_cpu_ptr(&exception_data) */ add,l \t1,\t2,\t1 /* t1 = t1->fault_ip */ LDREG EXCDATA_IP(\t1), \t1 .endm #else .macro get_fault_ip t1 t2 - /* t1 = &__get_cpu_var(exception_data) */ + /* t1 = this_cpu_ptr(&exception_data) */ addil LT%exception_data,%r27 LDREG RT%exception_data(%r1),\t2 /* t1 = t2->fault_ip */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 88eace4e28c3..a2a168e2dfe7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -88,6 +88,7 @@ config PPC select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select BINFMT_ELF + select ARCH_BINFMT_ELF_RANDOMIZE_PIE select OF select OF_EARLY_FLATTREE select OF_RESERVED_MEM @@ -128,6 +129,7 @@ config PPC select HAVE_BPF_JIT if PPC64 select HAVE_ARCH_JUMP_LABEL select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAS_GCOV_PROFILE_ALL select GENERIC_SMP_IDLE_THREAD select GENERIC_CMOS_UPDATE select GENERIC_TIME_VSYSCALL_OLD @@ -148,6 +150,8 @@ config PPC select HAVE_ARCH_AUDITSYSCALL select ARCH_SUPPORTS_ATOMIC_RMW select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN + select NO_BOOTMEM + select HAVE_GENERIC_RCU_GUP config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -549,7 +553,7 @@ config PPC_4K_PAGES bool "4k page size" config PPC_16K_PAGES - bool "16k page size" if 44x + bool "16k page size" if 44x || PPC_8xx config PPC_64K_PAGES bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64 diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts index 85646b4f96e1..2aa5cd318ce8 100644 --- a/arch/powerpc/boot/dts/b4860emu.dts +++ b/arch/powerpc/boot/dts/b4860emu.dts @@ -193,9 +193,9 @@ fsl,liodn-bits = <12>; }; - clockgen: global-utilities@e1000 { +/include/ "fsl/qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0"; - reg = <0xe1000 0x1000>; }; /include/ "fsl/qoriq-dma-0.dtsi" diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi index 8b47edcfabf0..e5bde0b85135 100644 --- a/arch/powerpc/boot/dts/b4qds.dtsi +++ b/arch/powerpc/boot/dts/b4qds.dtsi @@ -152,6 +152,29 @@ reg = <0x68>; }; }; + + i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + + ina220@40 { + compatible = "ti,ina220"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + }; + + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x3>; + + adt7461@4c { + compatible = "adi,adt7461"; + reg = <0x4c>; + }; + }; }; }; diff --git a/arch/powerpc/boot/dts/bsc9131rdb.dtsi b/arch/powerpc/boot/dts/bsc9131rdb.dtsi index 9e6c01339ccc..45efcbadb23c 100644 --- a/arch/powerpc/boot/dts/bsc9131rdb.dtsi +++ b/arch/powerpc/boot/dts/bsc9131rdb.dtsi @@ -40,31 +40,6 @@ compatible = "fsl,ifc-nand"; reg = <0x0 0x0 0x4000>; - partition@0 { - /* This location must not be altered */ - /* 3MB for u-boot Bootloader Image */ - reg = <0x0 0x00300000>; - label = "NAND U-Boot Image"; - read-only; - }; - - partition@300000 { - /* 1MB for DTB Image */ - reg = <0x00300000 0x00100000>; - label = "NAND DTB Image"; - }; - - partition@400000 { - /* 8MB for Linux Kernel Image */ - reg = <0x00400000 0x00800000>; - label = "NAND Linux Kernel Image"; - }; - - partition@c00000 { - /* Rest space for Root file System Image */ - reg = <0x00c00000 0x07400000>; - label = "NAND RFS Image"; - }; }; }; @@ -82,31 +57,6 @@ reg = <0>; spi-max-frequency = <50000000>; - /* 512KB for u-boot Bootloader Image */ - partition@0 { - reg = <0x0 0x00080000>; - label = "SPI Flash U-Boot Image"; - read-only; - }; - - /* 512KB for DTB Image */ - partition@80000 { - reg = <0x00080000 0x00080000>; - label = "SPI Flash DTB Image"; - }; - - /* 4MB for Linux Kernel Image */ - partition@100000 { - reg = <0x00100000 0x00400000>; - label = "SPI Flash Kernel Image"; - }; - - /*11MB for RFS Image */ - partition@500000 { - reg = <0x00500000 0x00B00000>; - label = "SPI Flash RFS Image"; - }; - }; }; diff --git a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi index d67894459ac8..86161ae6c966 100644 --- a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi @@ -80,33 +80,9 @@ compatible = "fsl,b4420-device-config", "fsl,qoriq-device-config-2.0"; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,b4420-clockgen", "fsl,qoriq-clockgen-2.0"; - ranges = <0x0 0xe1000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-2.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2", "pll0-div4"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2", "pll1-div4"; - }; mux0: mux0@0 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi index 582381dba1d7..65100b9636b7 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi @@ -124,33 +124,9 @@ compatible = "fsl,b4860-device-config", "fsl,qoriq-device-config-2.0"; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,b4860-clockgen", "fsl,qoriq-clockgen-2.0"; - ranges = <0x0 0xe1000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-2.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2", "pll0-div4"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2", "pll1-div4"; - }; mux0: mux0@0 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi index 69ce1026c948..efd74db4f9b0 100644 --- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi @@ -305,53 +305,9 @@ #sleep-cells = <2>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen1.dtsi" + global-utilities@e1000 { compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-1.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2"; - }; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux1"; - }; mux2: mux2@40 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi index cd63cb1b1042..d7425ef1ae41 100644 --- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi @@ -332,53 +332,9 @@ #sleep-cells = <2>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen1.dtsi" + global-utilities@e1000 { compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-1.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2"; - }; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux1"; - }; mux2: mux2@40 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi index 12947ccddf25..7005a4a4cef0 100644 --- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi @@ -352,35 +352,9 @@ #sleep-cells = <2>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen1.dtsi" + global-utilities@e1000 { compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-1.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2"; - }; pll2: pll2@840 { #clock-cells = <1>; @@ -398,24 +372,6 @@ clock-output-names = "pll3", "pll3-div2"; }; - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux1"; - }; - mux2: mux2@40 { #clock-cells = <0>; reg = <0x40 0x4>; diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi index 4c4a2b0436b2..55834211bd28 100644 --- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi @@ -337,53 +337,9 @@ #sleep-cells = <2>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen1.dtsi" + global-utilities@e1000 { compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-1.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2"; - }; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux1"; - }; }; rcpm: global-utilities@e2000 { diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi index 67296fdd9698..6e4cd6ce363c 100644 --- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi @@ -297,53 +297,9 @@ #sleep-cells = <2>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen1.dtsi" + global-utilities@e1000 { compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - clock-frequency = <0>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-1.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-1.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2"; - }; - - mux0: mux0@0 { - #clock-cells = <0>; - reg = <0x0 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux0"; - }; - - mux1: mux1@20 { - #clock-cells = <0>; - reg = <0x20 0x4>; - compatible = "fsl,qoriq-core-mux-1.0"; - clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; - clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; - clock-output-names = "cmux1"; - }; mux2: mux2@40 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi new file mode 100644 index 000000000000..4ece1edbff63 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi @@ -0,0 +1,85 @@ +/* + * QorIQ clock control device tree stub [ controller @ offset 0xe1000 ] + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +global-utilities@e1000 { + compatible = "fsl,qoriq-clockgen-1.0"; + ranges = <0x0 0xe1000 0x1000>; + reg = <0xe1000 0x1000>; + clock-frequency = <0>; + #address-cells = <1>; + #size-cells = <1>; + + sysclk: sysclk { + #clock-cells = <0>; + compatible = "fsl,qoriq-sysclk-1.0", "fixed-clock"; + clock-output-names = "sysclk"; + }; + pll0: pll0@800 { + #clock-cells = <1>; + reg = <0x800 0x4>; + compatible = "fsl,qoriq-core-pll-1.0"; + clocks = <&sysclk>; + clock-output-names = "pll0", "pll0-div2"; + }; + pll1: pll1@820 { + #clock-cells = <1>; + reg = <0x820 0x4>; + compatible = "fsl,qoriq-core-pll-1.0"; + clocks = <&sysclk>; + clock-output-names = "pll1", "pll1-div2"; + }; + mux0: mux0@0 { + #clock-cells = <0>; + reg = <0x0 0x4>; + compatible = "fsl,qoriq-core-mux-1.0"; + clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; + clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; + clock-output-names = "cmux0"; + }; + mux1: mux1@20 { + #clock-cells = <0>; + reg = <0x20 0x4>; + compatible = "fsl,qoriq-core-mux-1.0"; + clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>; + clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; + clock-output-names = "cmux1"; + }; + platform_pll: platform-pll@c00 { + #clock-cells = <1>; + reg = <0xc00 0x4>; + compatible = "fsl,qoriq-platform-pll-1.0"; + clocks = <&sysclk>; + clock-output-names = "platform-pll", "platform-pll-div2"; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi new file mode 100644 index 000000000000..48e0b6e4ce33 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi @@ -0,0 +1,68 @@ +/* + * QorIQ clock control device tree stub [ controller @ offset 0xe1000 ] + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +global-utilities@e1000 { + compatible = "fsl,qoriq-clockgen-2.0"; + ranges = <0x0 0xe1000 0x1000>; + reg = <0xe1000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + sysclk: sysclk { + #clock-cells = <0>; + compatible = "fsl,qoriq-sysclk-2.0", "fixed-clock"; + clock-output-names = "sysclk"; + }; + pll0: pll0@800 { + #clock-cells = <1>; + reg = <0x800 0x4>; + compatible = "fsl,qoriq-core-pll-2.0"; + clocks = <&sysclk>; + clock-output-names = "pll0", "pll0-div2", "pll0-div4"; + }; + pll1: pll1@820 { + #clock-cells = <1>; + reg = <0x820 0x4>; + compatible = "fsl,qoriq-core-pll-2.0"; + clocks = <&sysclk>; + clock-output-names = "pll1", "pll1-div2", "pll1-div4"; + }; + platform_pll: platform-pll@c00 { + #clock-cells = <1>; + reg = <0xc00 0x4>; + compatible = "fsl,qoriq-platform-pll-2.0"; + clocks = <&sysclk>; + clock-output-names = "platform-pll", "platform-pll-div2"; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 12e597eea3c8..15ae462e758f 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -281,35 +281,9 @@ fsl,liodn-bits = <12>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,t1040-clockgen", "fsl,qoriq-clockgen-2.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-2.0"; - clock-output-names = "sysclk", "fixed-clock"; - }; - - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2", "pll0-div4"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2", "pll1-div4"; - }; mux0: mux0@0 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index aecee9690a88..1ce91e3485a9 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -305,34 +305,9 @@ fsl,liodn-bits = <12>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,t2080-clockgen", "fsl,qoriq-clockgen-2.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-2.0"; - clock-output-names = "sysclk", "fixed-clock"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2", "pll0-div4"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2", "pll1-div4"; - }; mux0: mux0@0 { #clock-cells = <0>; diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi index 7e2fc7cdce48..0e96fcabe812 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi @@ -368,34 +368,9 @@ fsl,liodn-bits = <12>; }; - clockgen: global-utilities@e1000 { +/include/ "qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0"; - ranges = <0x0 0xe1000 0x1000>; - reg = <0xe1000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - - sysclk: sysclk { - #clock-cells = <0>; - compatible = "fsl,qoriq-sysclk-2.0"; - clock-output-names = "sysclk"; - }; - - pll0: pll0@800 { - #clock-cells = <1>; - reg = <0x800 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll0", "pll0-div2", "pll0-div4"; - }; - - pll1: pll1@820 { - #clock-cells = <1>; - reg = <0x820 0x4>; - compatible = "fsl,qoriq-core-pll-2.0"; - clocks = <&sysclk>; - clock-output-names = "pll1", "pll1-div2", "pll1-div4"; - }; pll2: pll2@840 { #clock-cells = <1>; diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts index 2fed3bc0b990..394ea9c943c9 100644 --- a/arch/powerpc/boot/dts/p3041ds.dts +++ b/arch/powerpc/boot/dts/p3041ds.dts @@ -98,6 +98,26 @@ reg = <0x68>; interrupts = <0x1 0x1 0 0>; }; + ina220@40 { + compatible = "ti,ina220"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + ina220@41 { + compatible = "ti,ina220"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + ina220@44 { + compatible = "ti,ina220"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + ina220@45 { + compatible = "ti,ina220"; + reg = <0x45>; + shunt-resistor = <1000>; + }; adt7461@4c { compatible = "adi,adt7461"; reg = <0x4c>; diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts index 2869fea717dd..b7f3057cd894 100644 --- a/arch/powerpc/boot/dts/p5020ds.dts +++ b/arch/powerpc/boot/dts/p5020ds.dts @@ -98,6 +98,26 @@ reg = <0x68>; interrupts = <0x1 0x1 0 0>; }; + ina220@40 { + compatible = "ti,ina220"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + ina220@41 { + compatible = "ti,ina220"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + ina220@44 { + compatible = "ti,ina220"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + ina220@45 { + compatible = "ti,ina220"; + reg = <0x45>; + shunt-resistor = <1000>; + }; adt7461@4c { compatible = "adi,adt7461"; reg = <0x4c>; diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts index 860b5ccf76c0..7e04bf487c04 100644 --- a/arch/powerpc/boot/dts/p5040ds.dts +++ b/arch/powerpc/boot/dts/p5040ds.dts @@ -95,6 +95,26 @@ reg = <0x68>; interrupts = <0x1 0x1 0 0>; }; + ina220@40 { + compatible = "ti,ina220"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + ina220@41 { + compatible = "ti,ina220"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + ina220@44 { + compatible = "ti,ina220"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + ina220@45 { + compatible = "ti,ina220"; + reg = <0x45>; + shunt-resistor = <1000>; + }; adt7461@4c { compatible = "adi,adt7461"; reg = <0x4c>; diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi index 1cf0f3c5f7e5..187add885cae 100644 --- a/arch/powerpc/boot/dts/t104xrdb.dtsi +++ b/arch/powerpc/boot/dts/t104xrdb.dtsi @@ -83,6 +83,13 @@ }; }; + i2c@118000 { + adt7461@4c { + compatible = "adi,adt7461"; + reg = <0x4c>; + }; + }; + i2c@118100 { pca9546@77 { compatible = "nxp,pca9546"; diff --git a/arch/powerpc/boot/dts/t208xqds.dtsi b/arch/powerpc/boot/dts/t208xqds.dtsi index 555dc6e03d89..59061834d54e 100644 --- a/arch/powerpc/boot/dts/t208xqds.dtsi +++ b/arch/powerpc/boot/dts/t208xqds.dtsi @@ -169,6 +169,17 @@ shunt-resistor = <1000>; }; }; + + i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x3>; + + adt7461@4c { + compatible = "adi,adt7461"; + reg = <0x4c>; + }; + }; }; }; diff --git a/arch/powerpc/boot/dts/t4240emu.dts b/arch/powerpc/boot/dts/t4240emu.dts index bc12127a03fb..decaf357db9c 100644 --- a/arch/powerpc/boot/dts/t4240emu.dts +++ b/arch/powerpc/boot/dts/t4240emu.dts @@ -250,9 +250,9 @@ fsl,liodn-bits = <12>; }; - clockgen: global-utilities@e1000 { +/include/ "fsl/qoriq-clockgen2.dtsi" + global-utilities@e1000 { compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0"; - reg = <0xe1000 0x1000>; }; /include/ "fsl/qoriq-dma-0.dtsi" diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index d367a0aece2a..d80161b633f4 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c @@ -144,13 +144,24 @@ static char cmdline[BOOT_COMMAND_LINE_SIZE] static void prep_cmdline(void *chosen) { + unsigned int getline_timeout = 5000; + int v; + int n; + + /* Wait-for-input time */ + n = getprop(chosen, "linux,cmdline-timeout", &v, sizeof(v)); + if (n == sizeof(v)) + getline_timeout = v; + if (cmdline[0] == '\0') getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); printf("\n\rLinux/PowerPC load: %s", cmdline); + /* If possible, edit the command line */ - if (console_ops.edit_cmdline) - console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE); + if (console_ops.edit_cmdline && getline_timeout) + console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE, getline_timeout); + printf("\n\r"); /* Put the command line back into the devtree for the kernel */ diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index 8aad3c55aeda..5e75e1c5518e 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -58,7 +58,7 @@ extern struct dt_ops dt_ops; struct console_ops { int (*open)(void); void (*write)(const char *buf, int len); - void (*edit_cmdline)(char *buf, int len); + void (*edit_cmdline)(char *buf, int len, unsigned int getline_timeout); void (*close)(void); void *data; }; diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index f2156f07571f..167ee9433de6 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -33,7 +33,7 @@ static void serial_write(const char *buf, int len) scdp->putc(*buf++); } -static void serial_edit_cmdline(char *buf, int len) +static void serial_edit_cmdline(char *buf, int len, unsigned int timeout) { int timer = 0, count; char ch, *cp; @@ -44,7 +44,7 @@ static void serial_edit_cmdline(char *buf, int len) cp = &buf[count]; count++; - while (timer++ < 5*1000) { + do { if (scdp->tstc()) { while (((ch = scdp->getc()) != '\n') && (ch != '\r')) { /* Test for backspace/delete */ @@ -70,7 +70,7 @@ static void serial_edit_cmdline(char *buf, int len) break; /* Exit 'timer' loop */ } udelay(1000); /* 1 msec */ - } + } while (timer++ < timeout); *cp = 0; } diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index dc939de9b5b0..b4c4b469e320 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -100,7 +100,6 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m # CONFIG_NET_VENDOR_3COM is not set CONFIG_FS_ENET=y diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig index e5a648115ada..7cb9719abf3d 100644 --- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig +++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig @@ -113,7 +113,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig index 8317b6010ba6..ecabf625d249 100644 --- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig @@ -114,7 +114,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index 124d66f0282c..4a4a86fb0d3d 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig @@ -165,7 +165,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig index 1e151594c691..99ea8746bbaf 100644 --- a/arch/powerpc/configs/86xx/sbc8641d_defconfig +++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig @@ -167,7 +167,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 59734916986a..8a08d6dcb0b4 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig @@ -211,7 +211,6 @@ CONFIG_MV643XX_ETH=y # CONFIG_NETDEV_10000 is not set # CONFIG_ATM_DRIVERS is not set CONFIG_NETCONSOLE=m -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 688e9e4d29a1..611efe99faeb 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -144,6 +144,7 @@ CONFIG_RTC_DRV_DS1374=y CONFIG_RTC_DRV_DS3232=y CONFIG_UIO=y CONFIG_STAGING=y +CONFIG_MEMORY=y CONFIG_VIRT_DRIVERS=y CONFIG_FSL_HV_MANAGER=y CONFIG_EXT2_FS=y diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 6db97e4414b2..be24a18c0d96 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -118,6 +118,7 @@ CONFIG_FSL_DMA=y CONFIG_VIRT_DRIVERS=y CONFIG_FSL_HV_MANAGER=y CONFIG_FSL_CORENET_CF=y +CONFIG_MEMORY=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_ISO9660_FS=m diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index d2c415489f72..02395fab19bd 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -215,6 +215,7 @@ CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_CMOS=y CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y +CONFIG_MEMORY=y # CONFIG_NET_DMA is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 87460083dbc7..b5d1b82a1b43 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -216,6 +216,7 @@ CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_CMOS=y CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y +CONFIG_MEMORY=y # CONFIG_NET_DMA is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 20bc5e2d368d..5830d735c5c3 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -154,7 +154,6 @@ CONFIG_WINDFARM_PM121=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index c3a3269b0865..67885b2d70aa 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -103,7 +103,6 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VORTEX=y CONFIG_ACENIC=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index fec5870f1818..ad6d6b5af7d7 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -629,7 +629,6 @@ CONFIG_SLIP_SMART=y CONFIG_NET_FC=y CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_VIRTIO_NET=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index dd2a9cab4b50..1f97364017c7 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -133,7 +133,6 @@ CONFIG_DM_UEVENT=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index d2008887eb8c..ac7ca5852827 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig @@ -134,7 +134,6 @@ CONFIG_DM_UEVENT=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m CONFIG_VHOST_NET=m diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c index f9e8b9491efc..d3feba5a275f 100644 --- a/arch/powerpc/crypto/sha1.c +++ b/arch/powerpc/crypto/sha1.c @@ -66,7 +66,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, src = data + done; } while (done + 63 < len); - memset(temp, 0, sizeof(temp)); + memzero_explicit(temp, sizeof(temp)); partial = 0; } memcpy(sctx->buffer + partial, src, len - done); @@ -154,4 +154,4 @@ module_exit(sha1_powerpc_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); -MODULE_ALIAS("sha1-powerpc"); +MODULE_ALIAS_CRYPTO("sha1-powerpc"); diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 31e8f59aff38..382b28e364dc 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += clkdev.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index bab79a110c7b..a3bf5be111ff 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -33,12 +33,9 @@ #define mb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#ifdef CONFIG_SMP - #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC #else @@ -46,20 +43,26 @@ #endif #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define dma_rmb() __lwsync() +#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") + +#ifdef CONFIG_SMP +#define smp_lwsync() __lwsync() #define smp_mb() mb() #define smp_rmb() __lwsync() #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#define smp_read_barrier_depends() read_barrier_depends() #else -#define __lwsync() barrier() +#define smp_lwsync() barrier() #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + /* * This is a barrier which prevents following instructions from being * started until the value of the argument x is known. For example, if @@ -72,7 +75,7 @@ #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - __lwsync(); \ + smp_lwsync(); \ ACCESS_ONCE(*p) = (v); \ } while (0) @@ -80,7 +83,7 @@ do { \ ({ \ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - __lwsync(); \ + smp_lwsync(); \ ___p1; \ }) diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index bd3bd573d0ae..59abc620f8e8 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -14,9 +14,9 @@ * * The bitop functions are defined to work on unsigned longs, so for a * ppc64 system the bits end up numbered: - * |63..............0|127............64|191...........128|255...........196| + * |63..............0|127............64|191...........128|255...........192| * and on ppc32: - * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| + * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224| * * There are a few little-endian macros used mostly for filesystem * bitmaps, these work on similar bit arrays layouts, but @@ -213,7 +213,7 @@ static __inline__ unsigned long ffz(unsigned long x) return __ilog2(x & -x); } -static __inline__ int __ffs(unsigned long x) +static __inline__ unsigned long __ffs(unsigned long x) { return __ilog2(x & -x); } diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index daa5af91163c..22d5a7da9e68 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -448,13 +448,9 @@ extern const char *powerpc_base_platform; CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) -#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ - CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \ - CPU_FTR_ICSWX | CPU_FTR_DABRX ) - #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E -#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2) +#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500) #else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ @@ -505,13 +501,13 @@ enum { #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E -#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2) +#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ - CPU_FTRS_POWER8_DD1 & CPU_FTRS_POSSIBLE) + CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE) #endif #else enum { diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index ca07f9c27335..0652ebe117af 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -39,6 +39,7 @@ struct device_node; #define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ #define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ #define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */ +#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */ /* * Delay for PE reset, all in ms @@ -72,6 +73,7 @@ struct device_node; #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ #define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ +#define EEH_PE_RESET (1 << 3) /* PE reset in progress */ #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 888d8f3f2524..57d289acb803 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -28,8 +28,7 @@ the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -extern unsigned long randomize_et_dyn(unsigned long base); -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) +#define ELF_ET_DYN_BASE 0x20000000 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h index 77ced0b3d81d..43b6bb1a4a9c 100644 --- a/arch/powerpc/include/asm/fsl_guts.h +++ b/arch/powerpc/include/asm/fsl_guts.h @@ -68,7 +68,10 @@ struct ccsr_guts { u8 res0b4[0xc0 - 0xb4]; __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register Called 'elbcvselcr' on 86xx SOCs */ - u8 res0c4[0x224 - 0xc4]; + u8 res0c4[0x100 - 0xc4]; + __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ u8 res22c[0x604 - 0x22c]; diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 1bbb3013d6aa..8add8b861e8d 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT -#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending +#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending) + +#define __ARCH_SET_SOFTIRQ_PENDING + +#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x)) +#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x)) static inline void ack_bad_irq(unsigned int irq) { diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 766b77d527ac..1d53a65b4ec1 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -48,7 +48,7 @@ static inline unsigned int hugepd_shift(hugepd_t hpd) #endif /* CONFIG_PPC_BOOK3S_64 */ -static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, +static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, unsigned pdshift) { /* @@ -58,9 +58,9 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, */ unsigned long idx = 0; - pte_t *dir = hugepd_page(*hpdp); + pte_t *dir = hugepd_page(hpd); #ifndef CONFIG_PPC_FSL_BOOK3E - idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); + idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); #endif return dir + idx; @@ -193,7 +193,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, } #define hugepd_shift(x) 0 -static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, +static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, unsigned pdshift) { return 0; diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 9eaf301ac952..a8d2ef30d473 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -855,9 +855,6 @@ static inline void * bus_to_virt(unsigned long address) #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) -void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, - size_t size, unsigned long flags); - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_IO_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 307347f8ddbd..c8175a3fe560 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -42,7 +42,7 @@ struct machdep_calls { unsigned long newpp, unsigned long vpn, int bpsize, int apsize, - int ssize, int local); + int ssize, unsigned long flags); void (*hpte_updateboltedpp)(unsigned long newpp, unsigned long ea, int psize, int ssize); @@ -60,7 +60,7 @@ struct machdep_calls { void (*hugepage_invalidate)(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, - int psize, int ssize); + int psize, int ssize, int local); /* special for kexec, to be called in real mode, linear mapping is * destroyed as well */ void (*hpte_clear_all)(void); @@ -142,7 +142,6 @@ struct machdep_calls { #endif void (*restart)(char *cmd); - void (*power_off)(void); void (*halt)(void); void (*panic)(char *str); void (*cpu_die)(void); @@ -292,10 +291,6 @@ struct machdep_calls { #ifdef CONFIG_ARCH_RANDOM int (*get_random_long)(unsigned long *v); #endif - -#ifdef CONFIG_MEMORY_HOTREMOVE - int (*remove_memory)(u64, u64); -#endif }; extern void e500_idle(void); @@ -343,16 +338,6 @@ extern sys_ctrler_t sys_ctrler; #endif /* CONFIG_PPC_PMAC */ - -/* Functions to produce codes on the leds. - * The SRC code should be unique for the message category and should - * be limited to the lower 24 bits (the upper 8 are set by these funcs), - * and (for boot & dump) should be sorted numerically in the order - * the events occur. - */ -/* Print a boot progress message. */ -void ppc64_boot_msg(unsigned int src, const char *msg); - static inline void log_error(char *buf, unsigned int err_type, int fatal) { if (ppc_md.log_error) diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 3d11d3ce79ec..986b9e1e1044 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -56,6 +56,7 @@ * additional information from the MI_EPN, and MI_TWC registers. */ #define SPRN_MI_RPN 790 +#define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */ /* Define an RPN value for mapping kernel memory to large virtual * pages for boot initialization. This has real page number of 0, @@ -129,6 +130,7 @@ * additional information from the MD_EPN, and MD_TWC registers. */ #define SPRN_MD_RPN 798 +#define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */ /* This is a temporary storage register that could be used to save * a processor working register during a tablewalk. diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index aeebc94b2bce..4f13c3ed7acf 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -316,27 +316,33 @@ static inline unsigned long hpt_hash(unsigned long vpn, return hash & 0x7fffffffffUL; } +#define HPTE_LOCAL_UPDATE 0x1 +#define HPTE_NOHPTE_UPDATE 0x2 + extern int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local, int ssize, int subpage_prot); + unsigned long flags, int ssize, int subpage_prot); extern int __hash_page_64K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local, int ssize); + unsigned long flags, int ssize); struct mm_struct; unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); -extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap); -extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); +extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, + unsigned long access, unsigned long trap, + unsigned long flags); +extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, + unsigned long dsisr); int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, - pte_t *ptep, unsigned long trap, int local, int ssize, - unsigned int shift, unsigned int mmu_psize); + pte_t *ptep, unsigned long trap, unsigned long flags, + int ssize, unsigned int shift, unsigned int mmu_psize); #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, pmd_t *pmdp, unsigned long trap, - int local, int ssize, unsigned int psize); + unsigned long flags, int ssize, unsigned int psize); #else static inline int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, pmd_t *pmdp, - unsigned long trap, int local, + unsigned long trap, unsigned long flags, int ssize, unsigned int psize) { BUG(); diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 9124b0ede1fc..5cd8d2fddba9 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -154,6 +154,10 @@ struct opal_sg_list { #define OPAL_HANDLE_HMI 98 #define OPAL_REGISTER_DUMP_REGION 101 #define OPAL_UNREGISTER_DUMP_REGION 102 +#define OPAL_WRITE_TPO 103 +#define OPAL_READ_TPO 104 +#define OPAL_IPMI_SEND 107 +#define OPAL_IPMI_RECV 108 #ifndef __ASSEMBLY__ @@ -284,62 +288,6 @@ enum OpalMessageType { OPAL_MSG_TYPE_MAX, }; -/* Machine check related definitions */ -enum OpalMCE_Version { - OpalMCE_V1 = 1, -}; - -enum OpalMCE_Severity { - OpalMCE_SEV_NO_ERROR = 0, - OpalMCE_SEV_WARNING = 1, - OpalMCE_SEV_ERROR_SYNC = 2, - OpalMCE_SEV_FATAL = 3, -}; - -enum OpalMCE_Disposition { - OpalMCE_DISPOSITION_RECOVERED = 0, - OpalMCE_DISPOSITION_NOT_RECOVERED = 1, -}; - -enum OpalMCE_Initiator { - OpalMCE_INITIATOR_UNKNOWN = 0, - OpalMCE_INITIATOR_CPU = 1, -}; - -enum OpalMCE_ErrorType { - OpalMCE_ERROR_TYPE_UNKNOWN = 0, - OpalMCE_ERROR_TYPE_UE = 1, - OpalMCE_ERROR_TYPE_SLB = 2, - OpalMCE_ERROR_TYPE_ERAT = 3, - OpalMCE_ERROR_TYPE_TLB = 4, -}; - -enum OpalMCE_UeErrorType { - OpalMCE_UE_ERROR_INDETERMINATE = 0, - OpalMCE_UE_ERROR_IFETCH = 1, - OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, - OpalMCE_UE_ERROR_LOAD_STORE = 3, - OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, -}; - -enum OpalMCE_SlbErrorType { - OpalMCE_SLB_ERROR_INDETERMINATE = 0, - OpalMCE_SLB_ERROR_PARITY = 1, - OpalMCE_SLB_ERROR_MULTIHIT = 2, -}; - -enum OpalMCE_EratErrorType { - OpalMCE_ERAT_ERROR_INDETERMINATE = 0, - OpalMCE_ERAT_ERROR_PARITY = 1, - OpalMCE_ERAT_ERROR_MULTIHIT = 2, -}; - -enum OpalMCE_TlbErrorType { - OpalMCE_TLB_ERROR_INDETERMINATE = 0, - OpalMCE_TLB_ERROR_PARITY = 1, - OpalMCE_TLB_ERROR_MULTIHIT = 2, -}; - enum OpalThreadStatus { OPAL_THREAD_INACTIVE = 0x0, OPAL_THREAD_STARTED = 0x1, @@ -452,52 +400,15 @@ struct opal_msg { __be64 params[8]; }; -struct opal_machine_check_event { - enum OpalMCE_Version version:8; /* 0x00 */ - uint8_t in_use; /* 0x01 */ - enum OpalMCE_Severity severity:8; /* 0x02 */ - enum OpalMCE_Initiator initiator:8; /* 0x03 */ - enum OpalMCE_ErrorType error_type:8; /* 0x04 */ - enum OpalMCE_Disposition disposition:8; /* 0x05 */ - uint8_t reserved_1[2]; /* 0x06 */ - uint64_t gpr3; /* 0x08 */ - uint64_t srr0; /* 0x10 */ - uint64_t srr1; /* 0x18 */ - union { /* 0x20 */ - struct { - enum OpalMCE_UeErrorType ue_error_type:8; - uint8_t effective_address_provided; - uint8_t physical_address_provided; - uint8_t reserved_1[5]; - uint64_t effective_address; - uint64_t physical_address; - uint8_t reserved_2[8]; - } ue_error; - - struct { - enum OpalMCE_SlbErrorType slb_error_type:8; - uint8_t effective_address_provided; - uint8_t reserved_1[6]; - uint64_t effective_address; - uint8_t reserved_2[16]; - } slb_error; - - struct { - enum OpalMCE_EratErrorType erat_error_type:8; - uint8_t effective_address_provided; - uint8_t reserved_1[6]; - uint64_t effective_address; - uint8_t reserved_2[16]; - } erat_error; +enum { + OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1, +}; - struct { - enum OpalMCE_TlbErrorType tlb_error_type:8; - uint8_t effective_address_provided; - uint8_t reserved_1[6]; - uint64_t effective_address; - uint8_t reserved_2[16]; - } tlb_error; - } u; +struct opal_ipmi_msg { + uint8_t version; + uint8_t netfn; + uint8_t cmd; + uint8_t data[]; }; /* FSP memory errors handling */ @@ -819,6 +730,9 @@ int64_t opal_rtc_read(__be32 *year_month_day, __be64 *hour_minute_second_millisecond); int64_t opal_rtc_write(uint32_t year_month_day, uint64_t hour_minute_second_millisecond); +int64_t opal_tpo_read(uint64_t token, __be32 *year_mon_day, __be32 *hour_min); +int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day, + uint32_t hour_min); int64_t opal_cec_power_down(uint64_t request); int64_t opal_cec_reboot(void); int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); @@ -963,6 +877,10 @@ int64_t opal_handle_hmi(void); int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); int64_t opal_unregister_dump_region(uint32_t id); int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number); +int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg, + uint64_t msg_len); +int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg, + uint64_t *msg_len); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, @@ -992,8 +910,6 @@ extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); struct rtc_time; -extern int opal_set_rtc_time(struct rtc_time *tm); -extern void opal_get_rtc_time(struct rtc_time *tm); extern unsigned long opal_get_boot_time(void); extern void opal_nvram_init(void); extern void opal_flash_init(void); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a5139ea6910b..24a386cbb928 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ #define get_slb_shadow() (get_paca()->slb_shadow_ptr) struct task_struct; -struct opal_machine_check_event; /* * Defines the layout of the paca. @@ -153,12 +152,6 @@ struct paca_struct { u64 tm_scratch; /* TM scratch area for reclaim */ #endif -#ifdef CONFIG_PPC_POWERNV - /* Pointer to OPAL machine check event structure set by the - * early exception handler for use by high level C handler - */ - struct opal_machine_check_event *opal_mc_evt; -#endif #ifdef CONFIG_PPC_BOOK3S_64 /* Exclusive emergency stack pointer for machine check exception. */ void *mc_emergency_sp; diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 26fe1ae15212..69c059887a2c 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -379,12 +379,14 @@ static inline int hugepd_ok(hugepd_t hpd) } #endif -#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#define pgd_huge pgd_huge int pgd_huge(pgd_t pgd); #else /* CONFIG_HUGETLB_PAGE */ #define is_hugepd(pdep) 0 #define pgd_huge(pgd) 0 #endif /* CONFIG_HUGETLB_PAGE */ +#define __hugepd(x) ((hugepd_t) { (x) }) struct page; extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 945e47adf7db..234e07c47803 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -170,6 +170,25 @@ static inline unsigned long pte_update(pte_t *p, #ifdef PTE_ATOMIC_UPDATES unsigned long old, tmp; +#ifdef CONFIG_PPC_8xx + unsigned long tmp2; + + __asm__ __volatile__("\ +1: lwarx %0,0,%4\n\ + andc %1,%0,%5\n\ + or %1,%1,%6\n\ + /* 0x200 == Extended encoding, bit 22 */ \ + /* Bit 22 has to be 1 if neither _PAGE_USER nor _PAGE_RW are set */ \ + rlwimi %1,%1,32-2,0x200\n /* get _PAGE_USER */ \ + rlwinm %3,%1,32-1,0x200\n /* get _PAGE_RW */ \ + or %1,%3,%1\n\ + xori %1,%1,0x200\n" +" stwcx. %1,0,%4\n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2) + : "r" (p), "r" (clr), "r" (set), "m" (*p) + : "cc" ); +#else /* CONFIG_PPC_8xx */ __asm__ __volatile__("\ 1: lwarx %0,0,%3\n\ andc %1,%0,%4\n\ @@ -180,6 +199,7 @@ static inline unsigned long pte_update(pte_t *p, : "=&r" (old), "=&r" (tmp), "=m" (*p) : "r" (p), "r" (clr), "r" (set), "m" (*p) : "cc" ); +#endif /* CONFIG_PPC_8xx */ #else /* PTE_ATOMIC_UPDATES */ unsigned long old = pte_val(*p); *p = __pte((old & ~clr) | set); diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h index 7b935683f268..132ee1d482c2 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h @@ -57,7 +57,21 @@ #define pgd_present(pgd) (pgd_val(pgd) != 0) #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) -#define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd)) + +#ifndef __ASSEMBLY__ + +static inline pte_t pgd_pte(pgd_t pgd) +{ + return __pte(pgd_val(pgd)); +} + +static inline pgd_t pte_pgd(pte_t pte) +{ + return __pgd(pte_val(pte)); +} +extern struct page *pgd_page(pgd_t pgd); + +#endif /* !__ASSEMBLY__ */ #define pud_offset(pgdp, addr) \ (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h index a56b82fb0609..1de35bbd02a6 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h @@ -38,4 +38,7 @@ /* Bits to mask out from a PGD/PUD to get to the PMD page */ #define PUD_MASKED_BITS 0x1ff +#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) +#define pte_pgd(pte) ((pgd_t)pte_pud(pte)) + #endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index ae153c40ab7c..b9dcc936e2d1 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -152,7 +152,7 @@ #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ || (pmd_val(pmd) & PMD_BAD_BITS)) -#define pmd_present(pmd) (pmd_val(pmd) != 0) +#define pmd_present(pmd) (!pmd_none(pmd)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) extern struct page *pmd_page(pmd_t pmd); @@ -164,9 +164,21 @@ extern struct page *pmd_page(pmd_t pmd); #define pud_present(pud) (pud_val(pud) != 0) #define pud_clear(pudp) (pud_val(*(pudp)) = 0) #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) -#define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) +extern struct page *pud_page(pud_t pud); + +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} +#define pud_write(pud) pte_write(pud_pte(pud)) #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) +#define pgd_write(pgd) pte_write(pgd_pte(pgd)) /* * Find an entry in a page-table-directory. We combine the address region @@ -422,7 +434,22 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd); - +/* + * + * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs + * page. The hugetlbfs page table walking and mangling paths are totally + * separated form the core VM paths and they're differentiated by + * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. + * + * pmd_trans_huge() is defined as false at build time if + * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build + * time in such case. + * + * For ppc64 we need to differntiate from explicit hugepages from THP, because + * for THP we also track the subpage details at the pmd level. We don't do + * that for explicit huge pages. + * + */ static inline int pmd_trans_huge(pmd_t pmd) { /* @@ -431,16 +458,6 @@ static inline int pmd_trans_huge(pmd_t pmd) return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); } -static inline int pmd_large(pmd_t pmd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - if (pmd_trans_huge(pmd)) - return pmd_val(pmd) & _PAGE_PRESENT; - return 0; -} - static inline int pmd_trans_splitting(pmd_t pmd) { if (pmd_trans_huge(pmd)) @@ -451,6 +468,14 @@ static inline int pmd_trans_splitting(pmd_t pmd) extern int has_transparent_hugepage(void); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmd_large(pmd_t pmd) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pmd_val(pmd) & 0x3) != 0x0); +} + static inline pte_t pmd_pte(pmd_t pmd) { return __pte(pmd_val(pmd)); @@ -467,6 +492,7 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) } #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) +#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) @@ -575,6 +601,5 @@ static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, */ return true; } - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 316f9a5da173..a8805fee0df9 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -274,11 +274,9 @@ extern void paging_init(void); */ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); -extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, - unsigned long end, int write, struct page **pages, int *nr); - extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, - unsigned long end, int write, struct page **pages, int *nr); + unsigned long end, int write, + struct page **pages, int *nr); #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #define has_transparent_hugepage() 0 diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 6f8536208049..1a5287759fc8 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -204,6 +204,7 @@ #define PPC_INST_ERATSX_DOT 0x7c000127 /* Misc instructions for BPF compiler */ +#define PPC_INST_LBZ 0x88000000 #define PPC_INST_LD 0xe8000000 #define PPC_INST_LHZ 0xa0000000 #define PPC_INST_LHBRX 0x7c00062c diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index dda7ac4c80bd..29c3798cf800 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -451,7 +451,7 @@ extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ -extern void power7_nap(int check_irq); +extern unsigned long power7_nap(int check_irq); extern void power7_sleep(void); extern void flush_instruction_cache(void); extern void hard_reset_now(void); diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h index d44826e4ff97..daa4616e61c4 100644 --- a/arch/powerpc/include/asm/pte-8xx.h +++ b/arch/powerpc/include/asm/pte-8xx.h @@ -48,19 +48,22 @@ */ #define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */ #define _PAGE_USER 0x0800 /* msb PP bits */ +/* set when neither _PAGE_USER nor _PAGE_RW are set */ +#define _PAGE_KNLRO 0x0200 #define _PMD_PRESENT 0x0001 #define _PMD_BAD 0x0ff0 #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c -#define _PTE_NONE_MASK _PAGE_ACCESSED +#define _PTE_NONE_MASK _PAGE_KNLRO /* Until my rework is finished, 8xx still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 /* We need to add _PAGE_SHARED to kernel pages */ -#define _PAGE_KERNEL_RO (_PAGE_SHARED) +#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_KNLRO) +#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_KNLRO) #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 11ba86e17631..fbdf18cf954c 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -8,7 +8,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex); extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern int init_bootmem_done; /* set once bootmem is available */ extern unsigned long long memory_limit; extern unsigned long klimit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); @@ -24,7 +23,7 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) void check_for_initrd(void); -void do_init_bootmem(void); +void initmem_init(void); void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index b034ecdb7c74..ebc4f165690a 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -71,13 +71,12 @@ struct thread_info { #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) /* how to get the thread information struct from C */ +register unsigned long __current_r1 asm("r1"); static inline struct thread_info *current_thread_info(void) { - register unsigned long sp asm("r1"); - /* gcc4, at least, is smart enough to turn this into a single * rlwinm for ppc32 and clrrdi for ppc64 */ - return (struct thread_info *)(sp & ~(THREAD_SIZE-1)); + return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); } #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 2def01ed0cb2..23d351ca0303 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void arch_enter_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); batch->active = 1; } static inline void arch_leave_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); @@ -125,9 +125,11 @@ static inline void arch_leave_lazy_mmu_mode(void) extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, - int ssize, int local); + int ssize, unsigned long flags); extern void flush_hash_range(unsigned long number, int local); - +extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr, + pmd_t *pmdp, unsigned int psize, int ssize, + unsigned long flags); static inline void local_flush_tlb_mm(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h index a2eac409c1ec..e5f8dd366212 100644 --- a/arch/powerpc/include/asm/vga.h +++ b/arch/powerpc/include/asm/vga.h @@ -38,12 +38,10 @@ static inline u16 scr_readw(volatile const u16 *addr) #endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ -extern unsigned long vgacon_remap_base; - #ifdef __powerpc64__ #define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s)) #else -#define VGA_MAP_MEM(x,s) (x + vgacon_remap_base) +#define VGA_MAP_MEM(x,s) (x) #endif #define vga_readb(x) (*(x)) diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 0d050ea37a04..6997f4a271df 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr); static inline void xics_push_cppr(unsigned int vec) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) return; @@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec) static inline unsigned char xics_pop_cppr(void) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); if (WARN_ON(os_cppr->index < 1)) return LOWEST_PRIORITY; @@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void) static inline void xics_set_base_cppr(unsigned char cppr) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); /* we only really want to set the priority when there's * just one cppr value on the stack @@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr) static inline unsigned char xics_cppr_top(void) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); return os_cppr->stack[os_cppr->index]; } diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a9c3e2e18c05..c046666038f8 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -87,4 +87,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 34f55524d456..86150fbb42c3 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -908,7 +908,7 @@ int fix_alignment(struct pt_regs *regs) flush_fp_to_thread(current); } - if ((nb == 16)) { + if (nb == 16) { if (flags & F) { /* Special case for 16-byte FP loads and stores */ PPC_WARN_ALIGNMENT(fp_pair, regs); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9d7dede2847c..c161ef3f28a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -726,12 +726,5 @@ int main(void) arch.timing_last_enter.tv32.tbl)); #endif -#ifdef CONFIG_PPC_POWERNV - DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3)); - DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0)); - DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1)); - DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt)); -#endif - return 0; } diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index c78e6dac4d7d..cfa0f81a5bb0 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -12,7 +12,6 @@ #undef DEBUG #include <linux/crash_dump.h> -#include <linux/bootmem.h> #include <linux/io.h> #include <linux/memblock.h> #include <asm/code-patching.h> diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index d55c76c571f3..f4217819cc31 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs) may_hard_irq_enable(); - __get_cpu_var(irq_stat).doorbell_irqs++; + __this_cpu_inc(irq_stat.doorbell_irqs); smp_ipi_demux(); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 2248a1999c64..e1b6d8e17289 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -143,6 +143,8 @@ static int __init eeh_setup(char *str) { if (!strcmp(str, "off")) eeh_add_flag(EEH_FORCE_DISABLED); + else if (!strcmp(str, "early_log")) + eeh_add_flag(EEH_EARLY_DUMP_LOG); return 1; } @@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) int eeh_reset_pe(struct eeh_pe *pe) { int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); - int i, rc; + int i, state, ret; + + /* Mark as reset and block config space */ + eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); /* Take three shots at resetting the bus */ - for (i=0; i<3; i++) { + for (i = 0; i < 3; i++) { eeh_reset_pe_once(pe); /* * EEH_PE_ISOLATED is expected to be removed after * BAR restore. */ - rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); - if ((rc & flags) == flags) - return 0; + state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); + if ((state & flags) == flags) { + ret = 0; + goto out; + } - if (rc < 0) { - pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", + if (state < 0) { + pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x", __func__, pe->phb->global_number, pe->addr); - return -1; + ret = -ENOTRECOVERABLE; + goto out; } - pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", - i+1, pe->phb->global_number, pe->addr, rc); + + /* We might run out of credits */ + ret = -EIO; + pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n", + __func__, state, pe->phb->global_number, pe->addr, (i + 1)); } - return -1; +out: + eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); + return ret; } /** @@ -920,11 +933,8 @@ int eeh_init(void) pr_warn("%s: Platform EEH operation not found\n", __func__); return -EEXIST; - } else if ((ret = eeh_ops->init())) { - pr_warn("%s: Failed to call platform init function (%d)\n", - __func__, ret); + } else if ((ret = eeh_ops->init())) return ret; - } /* Initialize EEH event */ ret = eeh_event_init(); @@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) static struct pci_device_id eeh_reset_ids[] = { { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ + { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */ { 0 } }; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 6535936bdf27..b17e793ba67e 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_report_error, &result); /* Issue reset */ - eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); ret = eeh_reset_pe(pe); if (ret) { - eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING); return ret; } - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); /* Unfreeze the PE */ ret = eeh_clear_pe_frozen_state(pe, true); @@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) * config accesses. So we prefer to block them. However, controlled * PCI config accesses initiated from EEH itself are allowed. */ - eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); rc = eeh_reset_pe(pe); - if (rc) { - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + if (rc) return rc; - } pci_lock_rescan_remove(); /* Restore PE */ eeh_ops->configure_bridge(pe); eeh_pe_restore_bars(pe); - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); /* Clear frozen state */ rc = eeh_clear_pe_frozen_state(pe, false); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 22b45a4955cd..10a093579191 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1424,12 +1424,18 @@ _GLOBAL(ftrace_graph_caller) lwz r4, 44(r1) subi r4, r4, MCOUNT_INSN_SIZE - /* get the parent address */ - addi r3, r1, 52 + /* Grab the LR out of the caller stack frame */ + lwz r3,52(r1) bl prepare_ftrace_return nop + /* + * prepare_ftrace_return gives us the address we divert to. + * Change the LR in the callers stack frame to this. + */ + stw r3,52(r1) + MCOUNT_RESTORE_FRAME /* old link register ends up in ctr reg */ bctr @@ -1457,4 +1463,4 @@ _GLOBAL(return_to_handler) blr #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#endif /* CONFIG_MCOUNT */ +#endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 0905c8da90f1..194e46dcf08d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -1227,13 +1227,20 @@ _GLOBAL(ftrace_graph_caller) ld r4, 128(r1) subi r4, r4, MCOUNT_INSN_SIZE - /* get the parent address */ + /* Grab the LR out of the caller stack frame */ ld r11, 112(r1) - addi r3, r11, 16 + ld r3, 16(r11) bl prepare_ftrace_return nop + /* + * prepare_ftrace_return gives us the address we divert to. + * Change the LR in the callers stack frame to this. + */ + ld r11, 112(r1) + std r3, 16(r11) + ld r0, 128(r1) mtlr r0 addi r1, r1, 112 @@ -1241,28 +1248,6 @@ _GLOBAL(ftrace_graph_caller) _GLOBAL(return_to_handler) /* need to save return values */ - std r4, -24(r1) - std r3, -16(r1) - std r31, -8(r1) - mr r31, r1 - stdu r1, -112(r1) - - bl ftrace_return_to_handler - nop - - /* return value has real return address */ - mtlr r3 - - ld r1, 0(r1) - ld r4, -24(r1) - ld r3, -16(r1) - ld r31, -8(r1) - - /* Jump back to real return address */ - blr - -_GLOBAL(mod_return_to_handler) - /* need to save return values */ std r4, -32(r1) std r3, -24(r1) /* save TOC */ @@ -1272,7 +1257,7 @@ _GLOBAL(mod_return_to_handler) stdu r1, -112(r1) /* - * We are in a module using the module's TOC. + * We might be called from a module. * Switch to our TOC to run inside the core kernel. */ ld r2, PACATOC(r13) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 72e783ea0681..db08382e19f1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -131,6 +131,8 @@ BEGIN_FTR_SECTION 1: #endif + /* Return SRR1 from power7_nap() */ + mfspr r3,SPRN_SRR1 beq cr1,2f b power7_wakeup_noloss 2: b power7_wakeup_loss @@ -292,15 +294,26 @@ decrementer_pSeries: . = 0xc00 .globl system_call_pSeries system_call_pSeries: - HMT_MEDIUM + /* + * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems + * that support it) before changing to HMT_MEDIUM. That allows the KVM + * code to save that value into the guest state (it is the guest's PPR + * value). Otherwise just change to HMT_MEDIUM as userspace has + * already saved the PPR. + */ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER SET_SCRATCH0(r13) GET_PACA(r13) std r9,PACA_EXGEN+EX_R9(r13) + OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); + HMT_MEDIUM; std r10,PACA_EXGEN+EX_R10(r13) + OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR); mfcr r9 KVMTEST(0xc00) GET_SCRATCH0(r13) +#else + HMT_MEDIUM; #endif SYSCALL_PSERIES_1 SYSCALL_PSERIES_2_RFID @@ -1301,23 +1314,6 @@ hmi_exception_after_realmode: EXCEPTION_PROLOG_0(PACA_EXGEN) b hmi_exception_hv -#ifdef CONFIG_PPC_POWERNV -_GLOBAL(opal_mc_secondary_handler) - HMT_MEDIUM_PPR_DISCARD - SET_SCRATCH0(r13) - GET_PACA(r13) - clrldi r3,r3,2 - tovirt(r3,r3) - std r3,PACA_OPAL_MC_EVT(r13) - ld r13,OPAL_MC_SRR0(r3) - mtspr SPRN_SRR0,r13 - ld r13,OPAL_MC_SRR1(r3) - mtspr SPRN_SRR1,r13 - ld r3,OPAL_MC_GPR3(r3) - GET_SCRATCH0(r13) - b machine_check_pSeries -#endif /* CONFIG_PPC_POWERNV */ - #define MACHINE_CHECK_HANDLER_WINDUP \ /* Clear MSR_RI before setting SRR0 and SRR1. */\ @@ -1571,9 +1567,11 @@ do_hash_page: * r3 contains the faulting address * r4 contains the required access permissions * r5 contains the trap number + * r6 contains dsisr * * at return r3 = 0 for success, 1 for page fault, negative for error */ + ld r6,_DSISR(r1) bl hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if hash_page succeeded */ diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 390311c0f03d..44d4d8eb3c85 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -449,7 +449,7 @@ void ftrace_replace_code(int enable) rec = ftrace_rec_iter_record(iter); ret = __ftrace_replace_code(rec, enable); if (ret) { - ftrace_bug(ret, rec->ip); + ftrace_bug(ret, rec); return; } } @@ -510,79 +510,36 @@ int ftrace_disable_ftrace_graph_caller(void) } #endif /* CONFIG_DYNAMIC_FTRACE */ -#ifdef CONFIG_PPC64 -extern void mod_return_to_handler(void); -#endif - /* * Hook the return address and push it in the stack of return addrs - * in current thread info. + * in current thread info. Return the address we want to divert to. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { - unsigned long old; - int faulted; struct ftrace_graph_ent trace; - unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long return_hooker; if (unlikely(ftrace_graph_is_dead())) - return; + goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) - return; - -#ifdef CONFIG_PPC64 - /* non core kernel code needs to save and restore the TOC */ - if (REGION_ID(self_addr) != KERNEL_REGION_ID) - return_hooker = (unsigned long)&mod_return_to_handler; -#endif - - return_hooker = ppc_function_entry((void *)return_hooker); + goto out; - /* - * Protect against fault, even if it shouldn't - * happen. This tool is too much intrusive to - * ignore such a protection. - */ - asm volatile( - "1: " PPC_LL "%[old], 0(%[parent])\n" - "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" - " li %[faulted], 0\n" - "3:\n" - - ".section .fixup, \"ax\"\n" - "4: li %[faulted], 1\n" - " b 3b\n" - ".previous\n" - - ".section __ex_table,\"a\"\n" - PPC_LONG_ALIGN "\n" - PPC_LONG "1b,4b\n" - PPC_LONG "2b,4b\n" - ".previous" - - : [old] "=&r" (old), [faulted] "=r" (faulted) - : [parent] "r" (parent), [return_hooker] "r" (return_hooker) - : "memory" - ); - - if (unlikely(faulted)) { - ftrace_graph_stop(); - WARN_ON(1); - return; - } + return_hooker = ppc_function_entry(return_to_handler); - trace.func = self_addr; + trace.func = ip; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - *parent = old; - return; - } + if (!ftrace_graph_entry(&trace)) + goto out; + + if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) + goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) - *parent = old; + parent = return_hooker; +out: + return parent; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index fafff8dbd5d9..d99aac0d69f1 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -33,13 +33,31 @@ /* Macro to make the code more readable. */ #ifdef CONFIG_8xx_CPU6 -#define DO_8xx_CPU6(val, reg) \ - li reg, val; \ - stw reg, 12(r0); \ - lwz reg, 12(r0); +#define SPRN_MI_TWC_ADDR 0x2b80 +#define SPRN_MI_RPN_ADDR 0x2d80 +#define SPRN_MD_TWC_ADDR 0x3b80 +#define SPRN_MD_RPN_ADDR 0x3d80 + +#define MTSPR_CPU6(spr, reg, treg) \ + li treg, spr##_ADDR; \ + stw treg, 12(r0); \ + lwz treg, 12(r0); \ + mtspr spr, reg #else -#define DO_8xx_CPU6(val, reg) +#define MTSPR_CPU6(spr, reg, treg) \ + mtspr spr, reg #endif + +/* + * Value for the bits that have fixed value in RPN entries. + * Also used for tagging DAR for DTLBerror. + */ +#ifdef CONFIG_PPC_16K_PAGES +#define RPN_PATTERN (0x00f0 | MD_SPS16K) +#else +#define RPN_PATTERN 0x00f0 +#endif + __HEAD _ENTRY(_stext); _ENTRY(_start); @@ -65,13 +83,6 @@ _ENTRY(_start); * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to * the "internal" processor registers before MMU_init is called. * - * The TLB code currently contains a major hack. Since I use the condition - * code register, I have to save and restore it. I am out of registers, so - * I just store it in memory location 0 (the TLB handlers are not reentrant). - * To avoid making any decisions, I need to use the "segment" valid bit - * in the first level table, but that would require many changes to the - * Linux page directory/table functions that I don't want to do right now. - * * -- Dan */ .globl __start @@ -211,7 +222,7 @@ MachineCheck: EXCEPTION_PROLOG mfspr r4,SPRN_DAR stw r4,_DAR(r11) - li r5,0x00f0 + li r5,RPN_PATTERN mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ mfspr r5,SPRN_DSISR stw r5,_DSISR(r11) @@ -219,30 +230,16 @@ MachineCheck: EXC_XFER_STD(0x200, machine_check_exception) /* Data access exception. - * This is "never generated" by the MPC8xx. We jump to it for other - * translation errors. + * This is "never generated" by the MPC8xx. */ . = 0x300 DataAccess: - EXCEPTION_PROLOG - mfspr r10,SPRN_DSISR - stw r10,_DSISR(r11) - mr r5,r10 - mfspr r4,SPRN_DAR - li r10,0x00f0 - mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ - EXC_XFER_LITE(0x300, handle_page_fault) /* Instruction access exception. - * This is "never generated" by the MPC8xx. We jump to it for other - * translation errors. + * This is "never generated" by the MPC8xx. */ . = 0x400 InstructionAccess: - EXCEPTION_PROLOG - mr r4,r12 - mr r5,r9 - EXC_XFER_LITE(0x400, handle_page_fault) /* External interrupt */ EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) @@ -253,7 +250,7 @@ Alignment: EXCEPTION_PROLOG mfspr r4,SPRN_DAR stw r4,_DAR(r11) - li r5,0x00f0 + li r5,RPN_PATTERN mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ mfspr r5,SPRN_DSISR stw r5,_DSISR(r11) @@ -292,8 +289,8 @@ SystemCall: . = 0x1100 /* * For the MPC8xx, this is a software tablewalk to load the instruction - * TLB. It is modelled after the example in the Motorola manual. The task - * switch loads the M_TWB register with the pointer to the first level table. + * TLB. The task switch loads the M_TW register with the pointer to the first + * level table. * If we discover there is no second level table (value is zero) or if there * is an invalid pte, we load that into the TLB, which causes another fault * into the TLB Error interrupt where we can handle such problems. @@ -302,20 +299,17 @@ SystemCall: */ InstructionTLBMiss: #ifdef CONFIG_8xx_CPU6 - stw r3, 8(r0) + mtspr SPRN_DAR, r3 #endif EXCEPTION_PROLOG_0 mtspr SPRN_SPRG_SCRATCH2, r10 mfspr r10, SPRN_SRR0 /* Get effective address of fault */ #ifdef CONFIG_8xx_CPU15 - addi r11, r10, 0x1000 + addi r11, r10, PAGE_SIZE tlbie r11 - addi r11, r10, -0x1000 + addi r11, r10, -PAGE_SIZE tlbie r11 #endif - DO_8xx_CPU6(0x3780, r3) - mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ - mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -323,32 +317,37 @@ InstructionTLBMiss: #ifdef CONFIG_MODULES /* Only modules will cause ITLB Misses as we always * pin the first 8MB of kernel memory */ - andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ + andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ +#endif + mfspr r11, SPRN_M_TW /* Get level 1 table base address */ +#ifdef CONFIG_MODULES beq 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - rlwimi r10, r11, 0, 2, 19 + lis r11, (swapper_pg_dir-PAGE_OFFSET)@h + ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l 3: #endif - lwz r11, 0(r10) /* Get the level 1 entry */ + /* Extract level 1 index */ + rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 + lwzx r11, r10, r11 /* Get the level 1 entry */ rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ beq 2f /* If zero, don't try to find a pte */ /* We have a pte table, so load the MI_TWC with the attributes * for this "segment." */ - ori r11,r11,1 /* Set valid bit */ - DO_8xx_CPU6(0x2b80, r3) - mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ - mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ - lwz r10, 0(r11) /* Get the pte */ + MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ + mfspr r11, SPRN_SRR0 /* Get effective address of fault */ + /* Extract level 2 index */ + rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 + lwzx r10, r10, r11 /* Get the pte */ #ifdef CONFIG_SWAP andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT + li r11, RPN_PATTERN bne- cr0, 2f +#else + li r11, RPN_PATTERN #endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 21 and 28 must be clear. @@ -356,62 +355,63 @@ InstructionTLBMiss: * set. All other Linux PTE bits control the behavior * of the MMU. */ - li r11, 0x00f0 rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */ - DO_8xx_CPU6(0x2d80, r3) - mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ + MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ /* Restore registers */ #ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) + mfspr r3, SPRN_DAR + mtspr SPRN_DAR, r11 /* Tag DAR */ #endif mfspr r10, SPRN_SPRG_SCRATCH2 EXCEPTION_EPILOG_0 rfi 2: - mfspr r11, SPRN_SRR1 + mfspr r10, SPRN_SRR1 /* clear all error bits as TLB Miss * sets a few unconditionally */ - rlwinm r11, r11, 0, 0xffff - mtspr SPRN_SRR1, r11 + rlwinm r10, r10, 0, 0xffff + mtspr SPRN_SRR1, r10 /* Restore registers */ #ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) + mfspr r3, SPRN_DAR + mtspr SPRN_DAR, r11 /* Tag DAR */ #endif mfspr r10, SPRN_SPRG_SCRATCH2 - EXCEPTION_EPILOG_0 - b InstructionAccess + b InstructionTLBError1 . = 0x1200 DataStoreTLBMiss: #ifdef CONFIG_8xx_CPU6 - stw r3, 8(r0) + mtspr SPRN_DAR, r3 #endif EXCEPTION_PROLOG_0 mtspr SPRN_SPRG_SCRATCH2, r10 - mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ + mfspr r10, SPRN_MD_EPN /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - andi. r11, r10, 0x0800 + andis. r11, r10, 0x8000 + mfspr r11, SPRN_M_TW /* Get level 1 table base address */ beq 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - rlwimi r10, r11, 0, 2, 19 + lis r11, (swapper_pg_dir-PAGE_OFFSET)@h + ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l 3: - lwz r11, 0(r10) /* Get the level 1 entry */ + /* Extract level 1 index */ + rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 + lwzx r11, r10, r11 /* Get the level 1 entry */ rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ beq 2f /* If zero, don't try to find a pte */ /* We have a pte table, so load fetch the pte from the table. */ - ori r11, r11, 1 /* Set valid bit in physical L2 page */ - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ - mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ + mfspr r10, SPRN_MD_EPN /* Get address of fault */ + /* Extract level 2 index */ + rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 + rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ lwz r10, 0(r10) /* Get the pte */ /* Insert the Guarded flag into the TWC from the Linux PTE. @@ -425,8 +425,7 @@ DataStoreTLBMiss: * It is bit 25 in the Linux PTE and bit 30 in the TWC */ rlwimi r11, r10, 32-5, 30, 30 - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 + MTSPR_CPU6(SPRN_MD_TWC, r11, r3) /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. * We also need to know if the insn is a load/store, so: @@ -442,14 +441,8 @@ DataStoreTLBMiss: and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT #endif - /* Honour kernel RO, User NA */ - /* 0x200 == Extended encoding, bit 22 */ - rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */ - /* r11 = (r10 & _PAGE_RW) >> 1 */ - rlwinm r11, r10, 32-1, 0x200 - or r10, r11, r10 - /* invert RW and 0x200 bits */ - xori r10, r10, _PAGE_RW | 0x200 + /* invert RW */ + xori r10, r10, _PAGE_RW /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 22 and 28 must be clear. @@ -457,14 +450,13 @@ DataStoreTLBMiss: * set. All other Linux PTE bits control the behavior * of the MMU. */ -2: li r11, 0x00f0 +2: li r11, RPN_PATTERN rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ - DO_8xx_CPU6(0x3d80, r3) - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ + MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ /* Restore registers */ #ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) + mfspr r3, SPRN_DAR #endif mtspr SPRN_DAR, r11 /* Tag DAR */ mfspr r10, SPRN_SPRG_SCRATCH2 @@ -477,7 +469,17 @@ DataStoreTLBMiss: */ . = 0x1300 InstructionTLBError: - b InstructionAccess + EXCEPTION_PROLOG_0 +InstructionTLBError1: + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 + mr r4,r12 + mr r5,r9 + andis. r10,r5,0x4000 + beq+ 1f + tlbie r4 + /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ +1: EXC_XFER_LITE(0x400, handle_page_fault) /* This is the data TLB error on the MPC8xx. This could be due to * many reasons, including a dirty update to a pte. We bail out to @@ -488,11 +490,21 @@ DataTLBError: EXCEPTION_PROLOG_0 mfspr r11, SPRN_DAR - cmpwi cr0, r11, 0x00f0 + cmpwi cr0, r11, RPN_PATTERN beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ DARFixed:/* Return from dcbx instruction bug workaround */ - EXCEPTION_EPILOG_0 - b DataAccess + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 + mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) + mfspr r4,SPRN_DAR + andis. r10,r5,0x4000 + beq+ 1f + tlbie r4 +1: li r10,RPN_PATTERN + mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ + /* 0x300 is DataAccess exception, needed by bad_page_fault() */ + EXC_XFER_LITE(0x300, handle_page_fault) EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) @@ -521,29 +533,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */ #define NO_SELF_MODIFYING_CODE FixupDAR:/* Entry point for dcbx workaround. */ #ifdef CONFIG_8xx_CPU6 - stw r3, 8(r0) + mtspr SPRN_DAR, r3 #endif mtspr SPRN_SPRG_SCRATCH2, r10 /* fetch instruction from memory. */ mfspr r10, SPRN_SRR0 andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ - DO_8xx_CPU6(0x3780, r3) - mtspr SPRN_MD_EPN, r10 - mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */ + mfspr r11, SPRN_M_TW /* Get level 1 table base address */ beq- 3f /* Branch if user space */ lis r11, (swapper_pg_dir-PAGE_OFFSET)@h ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l - rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */ -3: lwz r11, 0(r11) /* Get the level 1 entry */ - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ - mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ - lwz r11, 0(r11) /* Get the pte */ + /* Extract level 1 index */ +3: rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 + lwzx r11, r10, r11 /* Get the level 1 entry */ + rlwinm r10, r11,0,0,19 /* Extract page descriptor page address */ + mfspr r11, SPRN_SRR0 /* Get effective address of fault */ + /* Extract level 2 index */ + rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 + lwzx r11, r10, r11 /* Get the pte */ #ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) /* restore r3 from memory */ + mfspr r3, SPRN_DAR #endif /* concat physical page address(r11) and page offset(r10) */ - rlwimi r11, r10, 0, 20, 31 + mfspr r10, SPRN_SRR0 + rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31 lwz r11,0(r11) /* Check if it really is a dcbx instruction. */ /* dcbt and dcbtst does not generate DTLB Misses/Errors, @@ -698,11 +711,11 @@ start_here: #ifdef CONFIG_8xx_CPU6 lis r4, cpu6_errata_word@h ori r4, r4, cpu6_errata_word@l - li r3, 0x3980 + li r3, 0x3f80 stw r3, 12(r4) lwz r3, 12(r4) #endif - mtspr SPRN_M_TWB, r6 + mtspr SPRN_M_TW, r6 lis r4,2f@h ori r4,r4,2f@l tophys(r4,r4) @@ -876,10 +889,10 @@ _GLOBAL(set_context) lis r6, cpu6_errata_word@h ori r6, r6, cpu6_errata_word@l tophys (r4, r4) - li r7, 0x3980 + li r7, 0x3f80 stw r7, 12(r6) lwz r7, 12(r6) - mtspr SPRN_M_TWB, r4 /* Update MMU base address */ + mtspr SPRN_M_TW, r4 /* Update MMU base address */ li r7, 0x3380 stw r7, 12(r6) lwz r7, 12(r6) @@ -887,7 +900,7 @@ _GLOBAL(set_context) #else mtspr SPRN_M_CASID,r3 /* Update context */ tophys (r4, r4) - mtspr SPRN_M_TWB, r4 /* and pgd */ + mtspr SPRN_M_TW, r4 /* and pgd */ #endif SYNC blr @@ -919,12 +932,13 @@ set_dec_cpu6: .globl sdata sdata: .globl empty_zero_page + .align PAGE_SHIFT empty_zero_page: - .space 4096 + .space PAGE_SIZE .globl swapper_pg_dir swapper_pg_dir: - .space 4096 + .space PGD_TABLE_SIZE /* Room for two PTE table poiners, usually the kernel and current user * pointer to their respective root page table (pgdir). diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 1f7d84e2e8b2..05e804cdecaa 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type) int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); - struct perf_event **slot = &__get_cpu_var(bp_per_reg); + struct perf_event **slot = this_cpu_ptr(&bp_per_reg); *slot = bp; @@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { - struct perf_event **slot = &__get_cpu_var(bp_per_reg); + struct perf_event **slot = this_cpu_ptr(&bp_per_reg); if (*slot != bp) { WARN_ONCE(1, "Can't find the breakpoint"); @@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) */ rcu_read_lock(); - bp = __get_cpu_var(bp_per_reg); + bp = __this_cpu_read(bp_per_reg); if (!bp) goto out; info = counter_arch_bp(bp); diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c0754bbf8118..18c0687e5ab3 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -212,6 +212,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) mtspr SPRN_SRR0,r5 rfid +/* + * R3 here contains the value that will be returned to the caller + * of power7_nap. + */ _GLOBAL(power7_wakeup_loss) ld r1,PACAR1(r13) BEGIN_FTR_SECTION @@ -219,15 +223,19 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) REST_NVGPRS(r1) REST_GPR(2, r1) - ld r3,_CCR(r1) + ld r6,_CCR(r1) ld r4,_MSR(r1) ld r5,_NIP(r1) addi r1,r1,INT_FRAME_SIZE - mtcr r3 + mtcr r6 mtspr SPRN_SRR1,r4 mtspr SPRN_SRR0,r5 rfid +/* + * R3 here contains the value that will be returned to the caller + * of power7_nap. + */ _GLOBAL(power7_wakeup_noloss) lbz r0,PACA_NAPSTATELOST(r13) cmpwi r0,0 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index a83cf5ef6488..5d3968c4d799 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev, * We don't need to disable preemption here because any CPU can * safely use any IOMMU pool. */ - pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); + pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); if (largealloc) pool = &(tbl->large_pool); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c14383575fe8..45096033d37b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -50,7 +50,6 @@ #include <linux/list.h> #include <linux/radix-tree.h> #include <linux/mutex.h> -#include <linux/bootmem.h> #include <linux/pci.h> #include <linux/debugfs.h> #include <linux/of.h> @@ -114,7 +113,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) static inline notrace int decrementer_check_overflow(void) { u64 now = get_tb_or_rtc(); - u64 *next_tb = &__get_cpu_var(decrementers_next_tb); + u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); return now >= *next_tb; } @@ -499,7 +498,7 @@ void __do_irq(struct pt_regs *regs) /* And finally process it */ if (unlikely(irq == NO_IRQ)) - __get_cpu_var(irq_stat).spurious_irqs++; + __this_cpu_inc(irq_stat.spurious_irqs); else generic_handle_irq(irq); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 8504657379f1..e77c3ccf8dcf 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; struct thread_info *backup_current_thread_info = - &__get_cpu_var(kgdb_thread_info); + this_cpu_ptr(&kgdb_thread_info); if (user_mode(regs)) return 0; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 2f72af82513c..7c053f281406 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; } @@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = p; + __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_msr = regs->msr; } @@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) ret = 1; goto no_kprobe; } - p = __get_cpu_var(current_kprobe); + p = __this_cpu_read(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index a7fd4cb78b78..15c99b649b04 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled, uint64_t nip, uint64_t addr) { uint64_t srr1; - int index = __get_cpu_var(mce_nest_count)++; - struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); + int index = __this_cpu_inc_return(mce_nest_count); + struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); /* * Return if we don't have enough space to log mce event. @@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled, */ int get_mce_event(struct machine_check_event *mce, bool release) { - int index = __get_cpu_var(mce_nest_count) - 1; + int index = __this_cpu_read(mce_nest_count) - 1; struct machine_check_event *mc_evt; int ret = 0; @@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) /* Check if we have MCE info to process. */ if (index < MAX_MC_EVT) { - mc_evt = &__get_cpu_var(mce_event[index]); + mc_evt = this_cpu_ptr(&mce_event[index]); /* Copy the event structure and release the original */ if (mce) *mce = *mc_evt; @@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) } /* Decrement the count to free the slot. */ if (release) - __get_cpu_var(mce_nest_count)--; + __this_cpu_dec(mce_nest_count); return ret; } @@ -184,13 +184,13 @@ void machine_check_queue_event(void) if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return; - index = __get_cpu_var(mce_queue_count)++; + index = __this_cpu_inc_return(mce_queue_count); /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { - __get_cpu_var(mce_queue_count)--; + __this_cpu_dec(mce_queue_count); return; } - __get_cpu_var(mce_event_queue[index]) = evt; + memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); /* Queue irq work to process this event later. */ irq_work_queue(&mce_event_process_work); @@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work) * For now just print it to console. * TODO: log this error event to FSP or nvram. */ - while (__get_cpu_var(mce_queue_count) > 0) { - index = __get_cpu_var(mce_queue_count) - 1; + while (__this_cpu_read(mce_queue_count) > 0) { + index = __this_cpu_read(mce_queue_count) - 1; machine_check_print_event_info( - &__get_cpu_var(mce_event_queue[index])); - __get_cpu_var(mce_queue_count)--; + this_cpu_ptr(&mce_event_queue[index])); + __this_cpu_dec(mce_queue_count); } } diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index aa9aff3d6ad3..b6f123ab90ed 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -79,7 +79,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) } if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { if (cur_cpu_spec && cur_cpu_spec->flush_tlb) - cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); /* reset error bits */ dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; } @@ -110,7 +110,7 @@ static long mce_handle_common_ierror(uint64_t srr1) break; case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { - cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); handled = 1; } break; diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index f87bc1b4bdda..2f35a72642c6 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -110,7 +110,6 @@ static struct platform_driver of_pci_phb_driver = { .probe = of_pci_phb_probe, .driver = { .name = "of-pci", - .owner = THIS_MODULE, .of_match_table = of_pci_phb_ids, }, }; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index e5dad9a9edc0..37d512d35943 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -20,7 +20,6 @@ #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/of_address.h> @@ -1464,7 +1463,7 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, res = &hose->io_resource; if (!res->flags) { - printk(KERN_WARNING "PCI: I/O resource not set for host" + pr_info("PCI: I/O resource not set for host" " bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); } else { diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 432459c817fa..1f7930037cb7 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -199,9 +199,7 @@ pci_create_OF_bus_map(void) struct property* of_prop; struct device_node *dn; - of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); - if (!of_prop) - return; + of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index b15194e2c5fc..60bb187cb46a 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -17,7 +17,6 @@ #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/list.h> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 923cd2daba89..b4cc7bef6b16 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -37,9 +37,9 @@ #include <linux/personality.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> +#include <linux/uaccess.h> #include <asm/pgtable.h> -#include <asm/uaccess.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> @@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk) void __set_breakpoint(struct arch_hw_breakpoint *brk) { - __get_cpu_var(current_brk) = *brk; + memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); if (cpu_has_feature(CPU_FTR_DAWR)) set_dawr(brk); @@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev, * schedule DABR */ #ifndef CONFIG_HAVE_HW_BREAKPOINT - if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) + if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) __set_breakpoint(&new->thread.hw_brk); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif @@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev, * Collect processor utilization data per process */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); + struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); long unsigned start_tb, current_tb; start_tb = old_thread->start_tb; cu->current_tb = current_tb = mfspr(SPRN_PURR); @@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev, #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC_BOOK3S_64 - batch = &__get_cpu_var(ppc64_tlb_batch); + batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->active) { current_thread_info()->local_flags |= _TLF_LAZY_MMU; if (batch->index) @@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev, #ifdef CONFIG_PPC_BOOK3S_64 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; - batch = &__get_cpu_var(ppc64_tlb_batch); + batch = this_cpu_ptr(&ppc64_tlb_batch); batch->active = 1; } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -921,12 +921,8 @@ static void show_instructions(struct pt_regs *regs) pc = (unsigned long)phys_to_virt(pc); #endif - /* We use __get_user here *only* to avoid an OOPS on a - * bad address because the pc *should* only be a - * kernel address. - */ if (!__kernel_text_address(pc) || - __get_user(instr, (unsigned int __user *)pc)) { + probe_kernel_address((unsigned int __user *)pc, instr)) { printk(KERN_CONT "XXXXXXXX "); } else { if (regs->nip == pc) @@ -1531,13 +1527,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) int curr_frame = current->curr_ret_stack; extern void return_to_handler(void); unsigned long rth = (unsigned long)return_to_handler; - unsigned long mrth = -1; -#ifdef CONFIG_PPC64 - extern void mod_return_to_handler(void); - rth = *(unsigned long *)rth; - mrth = (unsigned long)mod_return_to_handler; - mrth = *(unsigned long *)mrth; -#endif #endif sp = (unsigned long) stack; @@ -1562,7 +1551,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) if (!firstframe || ip != lr) { printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if ((ip == rth || ip == mrth) && curr_frame >= 0) { + if ((ip == rth) && curr_frame >= 0) { printk(" (%pS)", (void *)current->ret_stack[curr_frame].ret); curr_frame--; @@ -1665,12 +1654,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return ret; } -unsigned long randomize_et_dyn(unsigned long base) -{ - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < base) - return base; - - return ret; -} diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 099f27e6d1b0..6a799b3cc6b4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -160,6 +160,12 @@ static struct ibm_pa_feature { {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, + /* + * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), + * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP + * which is 0 if the kernel doesn't support TM. + */ + {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, }; static void __init scan_features(unsigned long node, const unsigned char *ftrs, @@ -696,10 +702,7 @@ void __init early_init_devtree(void *params) reserve_crashkernel(); early_reserve_mem(); - /* - * Ensure that total memory size is page-aligned, because otherwise - * mark_bootmem() gets upset. - */ + /* Ensure that total memory size is page-aligned. */ limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); memblock_enforce_memory_limit(limit); diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 8777fb02349f..fb2fb3ea85e5 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -113,17 +113,6 @@ #define SENSOR_PREFIX "ibm,sensor-" #define cel_to_fahr(x) ((x*9/5)+32) - -/* Globals */ -static struct rtas_sensors sensors; -static struct device_node *rtas_node = NULL; -static unsigned long power_on_time = 0; /* Save the time the user set */ -static char progress_led[MAX_LINELENGTH]; - -static unsigned long rtas_tone_frequency = 1000; -static unsigned long rtas_tone_volume = 0; - -/* ****************STRUCTS******************************************* */ struct individual_sensor { unsigned int token; unsigned int quant; @@ -134,6 +123,15 @@ struct rtas_sensors { unsigned int quant; }; +/* Globals */ +static struct rtas_sensors sensors; +static struct device_node *rtas_node = NULL; +static unsigned long power_on_time = 0; /* Save the time the user set */ +static char progress_led[MAX_LINELENGTH]; + +static unsigned long rtas_tone_frequency = 1000; +static unsigned long rtas_tone_volume = 0; + /* ****************************************************************** */ /* Declarations */ static int ppc_rtas_sensors_show(struct seq_file *m, void *v); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8b4c857c1421..4af905e81ab0 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) } /* - * Call early during boot, before mem init or bootmem, to retrieve the RTAS - * informations from the device-tree and allocate the RMO buffer for userland + * Call early during boot, before mem init, to retrieve the RTAS + * information from the device-tree and allocate the RMO buffer for userland * accesses. */ void __init rtas_initialize(void) diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 7c55b86206b3..ce230da2c015 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -26,7 +26,6 @@ #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <asm/io.h> #include <asm/pgtable.h> diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 1362cd62b3fa..44c8d03558ac 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -139,8 +139,8 @@ void machine_restart(char *cmd) void machine_power_off(void) { machine_shutdown(); - if (ppc_md.power_off) - ppc_md.power_off(); + if (pm_power_off) + pm_power_off(); #ifdef CONFIG_SMP smp_send_stop(); #endif @@ -151,7 +151,7 @@ void machine_power_off(void) /* Used by the G5 thermal driver */ EXPORT_SYMBOL_GPL(machine_power_off); -void (*pm_power_off)(void) = machine_power_off; +void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); void machine_halt(void) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 07831ed0d9ef..bb02e9f6944e 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -11,7 +11,6 @@ #include <linux/delay.h> #include <linux/initrd.h> #include <linux/tty.h> -#include <linux/bootmem.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/cpu.h> @@ -53,11 +52,6 @@ unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; -#ifdef CONFIG_VGA_CONSOLE -unsigned long vgacon_remap_base; -EXPORT_SYMBOL(vgacon_remap_base); -#endif - /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. @@ -311,9 +305,8 @@ void __init setup_arch(char **cmdline_p) irqstack_early_init(); - /* set up the bootmem stuff with available memory */ - do_init_bootmem(); - if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); + initmem_init(); + if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4f3cfe1b6a33..49f553bbb360 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -660,13 +660,11 @@ static void __init emergency_stack_init(void) } /* - * Called into from start_kernel this initializes bootmem, which is used + * Called into from start_kernel this initializes memblock, which is used * to manage page allocation until mem_init is called. */ void __init setup_arch(char **cmdline_p) { - ppc64_boot_msg(0x12, "Setup Arch"); - *cmdline_p = boot_command_line; /* @@ -691,9 +689,7 @@ void __init setup_arch(char **cmdline_p) exc_lvl_early_init(); emergency_stack_init(); - /* set up the bootmem stuff with available memory */ - do_init_bootmem(); - sparse_init(); + initmem_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; @@ -711,33 +707,6 @@ void __init setup_arch(char **cmdline_p) if ((unsigned long)_stext & 0xffff) panic("Kernelbase not 64K-aligned (0x%lx)!\n", (unsigned long)_stext); - - ppc64_boot_msg(0x15, "Setup Done"); -} - - -/* ToDo: do something useful if ppc_md is not yet setup. */ -#define PPC64_LINUX_FUNCTION 0x0f000000 -#define PPC64_IPL_MESSAGE 0xc0000000 -#define PPC64_TERM_MESSAGE 0xb0000000 - -static void ppc64_do_msg(unsigned int src, const char *msg) -{ - if (ppc_md.progress) { - char buf[128]; - - sprintf(buf, "%08X\n", src); - ppc_md.progress(buf, 0); - snprintf(buf, 128, "%s", msg); - ppc_md.progress(buf, 0); - } -} - -/* Print a boot progress message. */ -void ppc64_boot_msg(unsigned int src, const char *msg) -{ - ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); - printk("[boot]%04x %s\n", src, msg); } #ifdef CONFIG_SMP diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 71e186d5f331..8b2d2dc8ef10 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) irqreturn_t smp_ipi_demux(void) { - struct cpu_messages *info = &__get_cpu_var(ipi_message); + struct cpu_messages *info = this_cpu_ptr(&ipi_message); unsigned int all; mb(); /* order any irq clear */ @@ -442,9 +442,9 @@ void generic_mach_cpu_die(void) idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); - __get_cpu_var(cpu_state) = CPU_DEAD; + __this_cpu_write(cpu_state, CPU_DEAD); smp_wmb(); - while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) + while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE) cpu_relax(); } diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 67fd2fd2620a..fa1fd8a0c867 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -394,10 +394,10 @@ void ppc_enable_pmcs(void) ppc_set_pmu_inuse(1); /* Only need to enable them once */ - if (__get_cpu_var(pmcs_enabled)) + if (__this_cpu_read(pmcs_enabled)) return; - __get_cpu_var(pmcs_enabled) = 1; + __this_cpu_write(pmcs_enabled, 1); if (ppc_md.enable_pmcs) ppc_md.enable_pmcs(); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 7505599c2593..fa7c4f12104f 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void) DEFINE_PER_CPU(u8, irq_work_pending); -#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 -#define test_irq_work_pending() __get_cpu_var(irq_work_pending) -#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 +#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) +#define test_irq_work_pending() __this_cpu_read(irq_work_pending) +#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) #endif /* 32 vs 64 bit */ @@ -482,8 +482,8 @@ void arch_irq_work_raise(void) static void __timer_interrupt(void) { struct pt_regs *regs = get_irq_regs(); - u64 *next_tb = &__get_cpu_var(decrementers_next_tb); - struct clock_event_device *evt = &__get_cpu_var(decrementers); + u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); + struct clock_event_device *evt = this_cpu_ptr(&decrementers); u64 now; trace_timer_interrupt_entry(regs); @@ -498,7 +498,7 @@ static void __timer_interrupt(void) *next_tb = ~(u64)0; if (evt->event_handler) evt->event_handler(evt); - __get_cpu_var(irq_stat).timer_irqs_event++; + __this_cpu_inc(irq_stat.timer_irqs_event); } else { now = *next_tb - now; if (now <= DECREMENTER_MAX) @@ -506,13 +506,13 @@ static void __timer_interrupt(void) /* We may have raced with new irq work */ if (test_irq_work_pending()) set_dec(1); - __get_cpu_var(irq_stat).timer_irqs_others++; + __this_cpu_inc(irq_stat.timer_irqs_others); } #ifdef CONFIG_PPC64 /* collect purr register values often, for accurate calculations */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); + struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); cu->current_tb = mfspr(SPRN_PURR); } #endif @@ -527,7 +527,7 @@ static void __timer_interrupt(void) void timer_interrupt(struct pt_regs * regs) { struct pt_regs *old_regs; - u64 *next_tb = &__get_cpu_var(decrementers_next_tb); + u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); /* Ensure a positive value is written to the decrementer, or else * some CPUs will continue to take decrementer exceptions. @@ -813,7 +813,7 @@ static void __init clocksource_init(void) static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { - __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; + __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); set_dec(evt); /* We may have raced with new irq work */ @@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, /* Interrupt handler for the timer broadcast IPI */ void tick_broadcast_ipi_handler(void) { - u64 *next_tb = &__get_cpu_var(decrementers_next_tb); + u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); *next_tb = get_tb_or_rtc(); __timer_interrupt(); @@ -989,6 +989,7 @@ void GregorianDay(struct rtc_time * tm) tm->tm_wday = day % 7; } +EXPORT_SYMBOL_GPL(GregorianDay); void to_tm(int tim, struct rtc_time * tm) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 0dc43f9932cf..e6595b72269b 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs) { long handled = 0; - __get_cpu_var(irq_stat).mce_exceptions++; + __this_cpu_inc(irq_stat.mce_exceptions); if (cur_cpu_spec && cur_cpu_spec->machine_check_early) handled = cur_cpu_spec->machine_check_early(regs); @@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs) long hmi_exception_realmode(struct pt_regs *regs) { - __get_cpu_var(irq_stat).hmi_exceptions++; + __this_cpu_inc(irq_stat.hmi_exceptions); if (ppc_md.hmi_exception_early) ppc_md.hmi_exception_early(regs); @@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs) enum ctx_state prev_state = exception_enter(); int recover = 0; - __get_cpu_var(irq_stat).mce_exceptions++; + __this_cpu_inc(irq_stat.mce_exceptions); /* See if any machine dependent calls. In theory, we would want * to call the CPU first, and call the ppc_md. one if the CPU @@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs) void performance_monitor_exception(struct pt_regs *regs) { - __get_cpu_var(irq_stat).pmu_irqs++; + __this_cpu_inc(irq_stat.pmu_irqs); perf_irq(regs); } diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 6e7c4923b5ea..411116c38da4 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -69,8 +69,12 @@ static void udbg_uart_putc(char c) static int udbg_uart_getc_poll(void) { - if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR)) + if (!udbg_uart_in) + return -1; + + if (!(udbg_uart_in(UART_LSR) & LSR_DR)) return udbg_uart_in(UART_RBR); + return -1; } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index f174351842cf..305eb0d9b768 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -20,7 +20,6 @@ #include <linux/user.h> #include <linux/elf.h> #include <linux/security.h> -#include <linux/bootmem.h> #include <linux/memblock.h> #include <asm/pgtable.h> diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 4fdc27c80f4c..3f1bb5a36c27 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -12,7 +12,6 @@ #include <linux/export.h> #include <linux/sched.h> #include <linux/spinlock.h> -#include <linux/bootmem.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/sizes.h> @@ -154,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt); * kvm_cma_reserve() - reserve area for kvm hash pagetable * * This function reserves memory from early allocator. It should be - * called by arch specific code once the early allocator (memblock or bootmem) + * called by arch specific code once the memblock allocator * has been activated and all other subsystems have already allocated/reserved * memory. */ diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index edb2ccdbb2ba..65c105b17a25 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -201,8 +201,6 @@ kvmppc_primary_no_guest: bge kvm_novcpu_exit /* another thread already exiting */ li r3, NAPPING_NOVCPU stb r3, HSTATE_NAPPING(r13) - li r3, 1 - stb r3, HSTATE_HWTHREAD_REQ(r13) b kvm_do_nap @@ -293,6 +291,8 @@ kvm_start_guest: /* if we have no vcpu to run, go back to sleep */ beq kvm_no_guest +kvm_secondary_got_guest: + /* Set HSTATE_DSCR(r13) to something sensible */ ld r6, PACA_DSCR(r13) std r6, HSTATE_DSCR(r13) @@ -318,27 +318,46 @@ kvm_start_guest: stwcx. r3, 0, r4 bne 51b +/* + * At this point we have finished executing in the guest. + * We need to wait for hwthread_req to become zero, since + * we may not turn on the MMU while hwthread_req is non-zero. + * While waiting we also need to check if we get given a vcpu to run. + */ kvm_no_guest: - li r0, KVM_HWTHREAD_IN_NAP + lbz r3, HSTATE_HWTHREAD_REQ(r13) + cmpwi r3, 0 + bne 53f + HMT_MEDIUM + li r0, KVM_HWTHREAD_IN_KERNEL stb r0, HSTATE_HWTHREAD_STATE(r13) -kvm_do_nap: - /* Clear the runlatch bit before napping */ - mfspr r2, SPRN_CTRLF - clrrdi r2, r2, 1 - mtspr SPRN_CTRLT, r2 - + /* need to recheck hwthread_req after a barrier, to avoid race */ + sync + lbz r3, HSTATE_HWTHREAD_REQ(r13) + cmpwi r3, 0 + bne 54f +/* + * We jump to power7_wakeup_loss, which will return to the caller + * of power7_nap in the powernv cpu offline loop. The value we + * put in r3 becomes the return value for power7_nap. + */ li r3, LPCR_PECE0 mfspr r4, SPRN_LPCR rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 mtspr SPRN_LPCR, r4 - isync - std r0, HSTATE_SCRATCH0(r13) - ptesync - ld r0, HSTATE_SCRATCH0(r13) -1: cmpd r0, r0 - bne 1b - nap - b . + li r3, 0 + b power7_wakeup_loss + +53: HMT_LOW + ld r4, HSTATE_KVM_VCPU(r13) + cmpdi r4, 0 + beq kvm_no_guest + HMT_MEDIUM + b kvm_secondary_got_guest + +54: li r0, KVM_HWTHREAD_IN_KVM + stb r0, HSTATE_HWTHREAD_STATE(r13) + b kvm_no_guest /****************************************************************************** * * @@ -2172,6 +2191,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the * runlatch bit before napping. */ +kvm_do_nap: mfspr r2, SPRN_CTRLF clrrdi r2, r2, 1 mtspr SPRN_CTRLT, r2 diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 2e02ed849f36..16095841afe1 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry) unsigned long sid; int ret = -1; - sid = ++(__get_cpu_var(pcpu_last_used_sid)); + sid = __this_cpu_inc_return(pcpu_last_used_sid); if (sid < NUM_TIDS) { - __get_cpu_var(pcpu_sids).entry[sid] = entry; + __this_cpu_write(pcpu_sids)entry[sid], entry); entry->val = sid; - entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; + entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]); ret = sid; } @@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry) static inline int local_sid_lookup(struct id *entry) { if (entry && entry->val != 0 && - __get_cpu_var(pcpu_sids).entry[entry->val] == entry && - entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) + __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && + entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val])) return entry->val; return -1; } @@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry) /* Invalidate all id mappings on local core -- call with preempt disabled */ static inline void local_sid_destroy_all(void) { - __get_cpu_var(pcpu_last_used_sid) = 0; - memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); + __this_cpu_write(pcpu_last_used_sid, 0); + memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids)); } static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 769778f855b0..cc536d4a75ef 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -661,7 +661,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, if (unlikely((pr && !(mas3 & MAS3_UX)) || (!pr && !(mas3 & MAS3_SX)))) { pr_err_ratelimited( - "%s: Instuction emulation from guest addres %08lx without execute permission\n", + "%s: Instruction emulation from guest address %08lx without execute permission\n", __func__, geaddr); return EMULATE_AGAIN; } @@ -673,7 +673,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, if (has_feature(vcpu, VCPU_FTR_MMU_V2) && unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { pr_err_ratelimited( - "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n", + "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n", __func__, geaddr); return EMULATE_AGAIN; } @@ -686,7 +686,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, /* Guard against emulation from devices area */ if (unlikely(!page_is_ram(pfn))) { - pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n", + pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", __func__, addr); return EMULATE_AGAIN; } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 2fdc8722e324..cda695de8aa7 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_GESR, vcpu->arch.shared->esr); if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || - __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) { + __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { kvmppc_e500_tlbil_all(vcpu_e500); - __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu; + __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu); } } diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 9f342f134ae4..597562f69b2d 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -12,7 +12,6 @@ CFLAGS_REMOVE_feature-fixups.o = -pg obj-y := string.o alloc.o \ crtsavres.o ppc_ksyms.o obj-$(CONFIG_PPC32) += div64.o copy_32.o -obj-$(CONFIG_HAS_IOMEM) += devres.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ usercopy_64.o mem_64.o string.o \ diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index da22c84a8fed..4a6c2cf890d9 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -13,9 +13,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) if (mem_init_done) p = kzalloc(size, mask); else { - p = alloc_bootmem(size); - if (p) - memset(p, 0, size); + p = memblock_virt_alloc(size, 0); } return p; } diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index c46c876ac96a..92ee840529bc 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -718,4 +718,4 @@ err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE b exit_vmx_usercopy /* tail call optimise */ -#endif /* CONFiG_ALTIVEC */ +#endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c deleted file mode 100644 index 8df55fc3aad6..000000000000 --- a/arch/powerpc/lib/devres.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (C) 2008 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/device.h> /* devres_*(), devm_ioremap_release() */ -#include <linux/gfp.h> -#include <linux/io.h> /* ioremap_prot() */ -#include <linux/export.h> /* EXPORT_SYMBOL() */ - -/** - * devm_ioremap_prot - Managed ioremap_prot() - * @dev: Generic device to remap IO address for - * @offset: BUS offset to map - * @size: Size of map - * @flags: Page flags - * - * Managed ioremap_prot(). Map is automatically unmapped on driver - * detach. - */ -void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, - size_t size, unsigned long flags) -{ - void __iomem **ptr, *addr; - - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - addr = ioremap_prot(offset, size, flags); - if (addr) { - *ptr = addr; - devres_add(dev, ptr); - } else - devres_free(ptr); - - return addr; -} -EXPORT_SYMBOL(devm_ioremap_prot); diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 2ff5c142f87b..0830587df16e 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -653,4 +653,4 @@ _GLOBAL(memcpy_power7) 15: addi r1,r1,STACKFRAMESIZE ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_copy /* tail call optimise */ -#endif /* CONFiG_ALTIVEC */ +#endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 54651fc2d412..dc885b30f7a6 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1865,6 +1865,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) } goto ldst_done; +#ifdef CONFIG_PPC_FPU case LOAD_FP: if (regs->msr & MSR_LE) return 0; @@ -1873,7 +1874,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) else err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); goto ldst_done; - +#endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: if (regs->msr & MSR_LE) @@ -1919,6 +1920,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) err = write_mem(op.val, op.ea, size, regs); goto ldst_done; +#ifdef CONFIG_PPC_FPU case STORE_FP: if (regs->msr & MSR_LE) return 0; @@ -1927,7 +1929,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) else err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); goto ldst_done; - +#endif #ifdef CONFIG_ALTIVEC case STORE_VMX: if (regs->msr & MSR_LE) diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 325e861616a1..438dcd3fd0d1 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -6,7 +6,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) -obj-y := fault.o mem.o pgtable.o gup.o mmap.o \ +obj-y := fault.o mem.o pgtable.o mmap.o \ init_$(CONFIG_WORD_SIZE).o \ pgtable_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 08d659a9fcdb..eb79907f34fa 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -43,7 +43,6 @@ #include <asm/tlbflush.h> #include <asm/siginfo.h> #include <asm/debug.h> -#include <mm/mmu_decl.h> #include "icswx.h" @@ -380,12 +379,6 @@ good_area: goto bad_area; #endif /* CONFIG_6xx */ #if defined(CONFIG_8xx) - /* 8xx sometimes need to load a invalid/non-present TLBs. - * These must be invalidated separately as linux mm don't. - */ - if (error_code & 0x40000000) /* no translation? */ - _tlbil_va(address, 0, 0, 0); - /* The MPC8xx seems to always set 0x80000000, which is * "undefined". Of those that can be set, this is the only * one which seems bad. diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c deleted file mode 100644 index d8746684f606..000000000000 --- a/arch/powerpc/mm/gup.c +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Lockless get_user_pages_fast for powerpc - * - * Copyright (C) 2008 Nick Piggin - * Copyright (C) 2008 Novell Inc. - */ -#undef DEBUG - -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/hugetlb.h> -#include <linux/vmstat.h> -#include <linux/pagemap.h> -#include <linux/rwsem.h> -#include <asm/pgtable.h> - -#ifdef __HAVE_ARCH_PTE_SPECIAL - -/* - * The performance critical leaf functions are made noinline otherwise gcc - * inlines everything into a single function which results in too much - * register pressure. - */ -static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, - unsigned long end, int write, struct page **pages, int *nr) -{ - unsigned long mask, result; - pte_t *ptep; - - result = _PAGE_PRESENT|_PAGE_USER; - if (write) - result |= _PAGE_RW; - mask = result | _PAGE_SPECIAL; - - ptep = pte_offset_kernel(&pmd, addr); - do { - pte_t pte = ACCESS_ONCE(*ptep); - struct page *page; - /* - * Similar to the PMD case, NUMA hinting must take slow path - */ - if (pte_numa(pte)) - return 0; - - if ((pte_val(pte) & mask) != result) - return 0; - VM_BUG_ON(!pfn_valid(pte_pfn(pte))); - page = pte_page(pte); - if (!page_cache_get_speculative(page)) - return 0; - if (unlikely(pte_val(pte) != pte_val(*ptep))) { - put_page(page); - return 0; - } - pages[*nr] = page; - (*nr)++; - - } while (ptep++, addr += PAGE_SIZE, addr != end); - - return 1; -} - -static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, - int write, struct page **pages, int *nr) -{ - unsigned long next; - pmd_t *pmdp; - - pmdp = pmd_offset(&pud, addr); - do { - pmd_t pmd = ACCESS_ONCE(*pmdp); - - next = pmd_addr_end(addr, end); - /* - * If we find a splitting transparent hugepage we - * return zero. That will result in taking the slow - * path which will call wait_split_huge_page() - * if the pmd is still in splitting state - */ - if (pmd_none(pmd) || pmd_trans_splitting(pmd)) - return 0; - if (pmd_huge(pmd) || pmd_large(pmd)) { - /* - * NUMA hinting faults need to be handled in the GUP - * slowpath for accounting purposes and so that they - * can be serialised against THP migration. - */ - if (pmd_numa(pmd)) - return 0; - - if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next, - write, pages, nr)) - return 0; - } else if (is_hugepd(pmdp)) { - if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT, - addr, next, write, pages, nr)) - return 0; - } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) - return 0; - } while (pmdp++, addr = next, addr != end); - - return 1; -} - -static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, - int write, struct page **pages, int *nr) -{ - unsigned long next; - pud_t *pudp; - - pudp = pud_offset(&pgd, addr); - do { - pud_t pud = ACCESS_ONCE(*pudp); - - next = pud_addr_end(addr, end); - if (pud_none(pud)) - return 0; - if (pud_huge(pud)) { - if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next, - write, pages, nr)) - return 0; - } else if (is_hugepd(pudp)) { - if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT, - addr, next, write, pages, nr)) - return 0; - } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) - return 0; - } while (pudp++, addr = next, addr != end); - - return 1; -} - -int __get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages) -{ - struct mm_struct *mm = current->mm; - unsigned long addr, len, end; - unsigned long next; - unsigned long flags; - pgd_t *pgdp; - int nr = 0; - - pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); - - start &= PAGE_MASK; - addr = start; - len = (unsigned long) nr_pages << PAGE_SHIFT; - end = start + len; - - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - start, len))) - return 0; - - pr_devel(" aligned: %lx .. %lx\n", start, end); - - /* - * XXX: batch / limit 'nr', to avoid large irq off latency - * needs some instrumenting to determine the common sizes used by - * important workloads (eg. DB2), and whether limiting the batch size - * will decrease performance. - * - * It seems like we're in the clear for the moment. Direct-IO is - * the main guy that batches up lots of get_user_pages, and even - * they are limited to 64-at-a-time which is not so many. - */ - /* - * This doesn't prevent pagetable teardown, but does prevent - * the pagetables from being freed on powerpc. - * - * So long as we atomically load page table pointers versus teardown, - * we can follow the address down to the the page and take a ref on it. - */ - local_irq_save(flags); - - pgdp = pgd_offset(mm, addr); - do { - pgd_t pgd = ACCESS_ONCE(*pgdp); - - pr_devel(" %016lx: normal pgd %p\n", addr, - (void *)pgd_val(pgd)); - next = pgd_addr_end(addr, end); - if (pgd_none(pgd)) - break; - if (pgd_huge(pgd)) { - if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next, - write, pages, &nr)) - break; - } else if (is_hugepd(pgdp)) { - if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT, - addr, next, write, pages, &nr)) - break; - } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) - break; - } while (pgdp++, addr = next, addr != end); - - local_irq_restore(flags); - - return nr; -} - -int get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages) -{ - struct mm_struct *mm = current->mm; - int nr, ret; - - start &= PAGE_MASK; - nr = __get_user_pages_fast(start, nr_pages, write, pages); - ret = nr; - - if (nr < nr_pages) { - pr_devel(" slow path ! nr = %d\n", nr); - - /* Try to get the remaining pages with get_user_pages */ - start += nr << PAGE_SHIFT; - pages += nr; - - down_read(&mm->mmap_sem); - ret = get_user_pages(current, mm, start, - nr_pages - nr, write, 0, pages, NULL); - up_read(&mm->mmap_sem); - - /* Have to be a bit careful with return values */ - if (nr > 0) { - if (ret < 0) - ret = nr; - else - ret += nr; - } - } - - return ret; -} - -#endif /* __HAVE_ARCH_PTE_SPECIAL */ diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 057cbbb4c576..463174a4a647 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -46,7 +46,8 @@ /* * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, int local, int ssize) + * pte_t *ptep, unsigned long trap, unsigned long flags, + * int ssize) * * Adds a 4K page to the hash table in a segment of 4K pages only */ @@ -298,7 +299,7 @@ htab_modify_pte: li r6,MMU_PAGE_4K /* base page size */ li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ - ld r9,STK_PARAM(R8)(r1) /* get "local" param */ + ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ .globl htab_call_hpte_updatepp htab_call_hpte_updatepp: bl . /* Patched by htab_finish_init() */ @@ -338,8 +339,8 @@ htab_pte_insert_failure: *****************************************************************************/ /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, int local, int ssize, - * int subpg_prot) + * pte_t *ptep, unsigned long trap, unsigned local flags, + * int ssize, int subpg_prot) */ /* @@ -514,7 +515,7 @@ htab_insert_pte: andis. r0,r31,_PAGE_4K_PFN@h srdi r5,r31,PTE_RPN_SHIFT bne- htab_special_pfn - sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT + sldi r5,r5,PAGE_FACTOR add r5,r5,r25 htab_special_pfn: sldi r5,r5,HW_PAGE_SHIFT @@ -544,7 +545,7 @@ htab_call_hpte_insert1: andis. r0,r31,_PAGE_4K_PFN@h srdi r5,r31,PTE_RPN_SHIFT bne- 3f - sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT + sldi r5,r5,PAGE_FACTOR add r5,r5,r25 3: sldi r5,r5,HW_PAGE_SHIFT @@ -594,7 +595,7 @@ htab_inval_old_hpte: li r5,0 /* PTE.hidx */ li r6,MMU_PAGE_64K /* psize */ ld r7,STK_PARAM(R9)(r1) /* ssize */ - ld r8,STK_PARAM(R8)(r1) /* local */ + ld r8,STK_PARAM(R8)(r1) /* flags */ bl flush_hash_page /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ lis r0,_PAGE_HPTE_SUB@h @@ -666,7 +667,7 @@ htab_modify_pte: li r6,MMU_PAGE_4K /* base page size */ li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ - ld r9,STK_PARAM(R8)(r1) /* get "local" param */ + ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ .globl htab_call_hpte_updatepp htab_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ @@ -962,7 +963,7 @@ ht64_modify_pte: li r6,MMU_PAGE_64K /* base page size */ li r7,MMU_PAGE_64K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ - ld r9,STK_PARAM(R8)(r1) /* get "local" param */ + ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ .globl ht64_call_hpte_updatepp ht64_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index ae4962a06476..9c4880ddecd6 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -283,19 +283,17 @@ static long native_hpte_remove(unsigned long hpte_group) static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int bpsize, - int apsize, int ssize, int local) + int apsize, int ssize, unsigned long flags) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; - int ret = 0; + int ret = 0, local = 0; want_v = hpte_encode_avpn(vpn, bpsize, ssize); DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", vpn, want_v & HPTE_V_AVPN, slot, newpp); - native_lock_hpte(hptep); - hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do @@ -308,15 +306,30 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, DBG_LOW(" -> miss\n"); ret = -1; } else { - DBG_LOW(" -> hit\n"); - /* Update the HPTE */ - hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); + native_lock_hpte(hptep); + /* recheck with locks held */ + hpte_v = be64_to_cpu(hptep->v); + if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) || + !(hpte_v & HPTE_V_VALID))) { + ret = -1; + } else { + DBG_LOW(" -> hit\n"); + /* Update the HPTE */ + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & + ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N | + HPTE_R_C))); + } + native_unlock_hpte(hptep); } - native_unlock_hpte(hptep); - /* Ensure it is out of the tlb too. */ - tlbie(vpn, bpsize, apsize, ssize, local); + if (flags & HPTE_LOCAL_UPDATE) + local = 1; + /* + * Ensure it is out of the tlb too if it is not a nohpte fault + */ + if (!(flags & HPTE_NOHPTE_UPDATE)) + tlbie(vpn, bpsize, apsize, ssize, local); return ret; } @@ -419,7 +432,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, static void native_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, - int psize, int ssize) + int psize, int ssize, int local) { int i; struct hash_pte *hptep; @@ -465,7 +478,7 @@ static void native_hugepage_invalidate(unsigned long vsid, * instruction compares entry_VA in tlb with the VA specified * here */ - tlbie(vpn, psize, actual_psize, ssize, 0); + tlbie(vpn, psize, actual_psize, ssize, local); } local_irq_restore(flags); } @@ -629,7 +642,7 @@ static void native_flush_hash_range(unsigned long number, int local) unsigned long want_v; unsigned long flags; real_pte_t pte; - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); unsigned long psize = batch->psize; int ssize = batch->ssize; int i; diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d5339a3b9945..2c2022d16059 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -989,7 +989,9 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm, * -1 - critical hash insertion error * -2 - access not permitted by subpage protection mechanism */ -int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) +int hash_page_mm(struct mm_struct *mm, unsigned long ea, + unsigned long access, unsigned long trap, + unsigned long flags) { enum ctx_state prev_state = exception_enter(); pgd_t *pgdir; @@ -997,7 +999,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u pte_t *ptep; unsigned hugeshift; const struct cpumask *tmp; - int rc, user_region = 0, local = 0; + int rc, user_region = 0; int psize, ssize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", @@ -1049,7 +1051,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u /* Check CPU locality */ tmp = cpumask_of(smp_processor_id()); if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) - local = 1; + flags |= HPTE_LOCAL_UPDATE; #ifndef CONFIG_PPC_64K_PAGES /* If we use 4K pages and our psize is not 4K, then we might @@ -1086,11 +1088,11 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u if (hugeshift) { if (pmd_trans_huge(*(pmd_t *)ptep)) rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, - trap, local, ssize, psize); + trap, flags, ssize, psize); #ifdef CONFIG_HUGETLB_PAGE else rc = __hash_page_huge(ea, access, vsid, ptep, trap, - local, ssize, hugeshift, psize); + flags, ssize, hugeshift, psize); #else else { /* @@ -1149,7 +1151,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u #ifdef CONFIG_PPC_HAS_HASH_64K if (psize == MMU_PAGE_64K) - rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); + rc = __hash_page_64K(ea, access, vsid, ptep, trap, + flags, ssize); else #endif /* CONFIG_PPC_HAS_HASH_64K */ { @@ -1158,7 +1161,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u rc = -2; else rc = __hash_page_4K(ea, access, vsid, ptep, trap, - local, ssize, spp); + flags, ssize, spp); } /* Dump some info in case of hash insertion failure, they should @@ -1181,14 +1184,19 @@ bail: } EXPORT_SYMBOL_GPL(hash_page_mm); -int hash_page(unsigned long ea, unsigned long access, unsigned long trap) +int hash_page(unsigned long ea, unsigned long access, unsigned long trap, + unsigned long dsisr) { + unsigned long flags = 0; struct mm_struct *mm = current->mm; if (REGION_ID(ea) == VMALLOC_REGION_ID) mm = &init_mm; - return hash_page_mm(mm, ea, access, trap); + if (dsisr & DSISR_NOHPTE) + flags |= HPTE_NOHPTE_UPDATE; + + return hash_page_mm(mm, ea, access, trap, flags); } EXPORT_SYMBOL_GPL(hash_page); @@ -1200,7 +1208,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, pgd_t *pgdir; pte_t *ptep; unsigned long flags; - int rc, ssize, local = 0; + int rc, ssize, update_flags = 0; BUG_ON(REGION_ID(ea) != USER_REGION_ID); @@ -1251,16 +1259,17 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Is that local to this CPU ? */ if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) - local = 1; + update_flags |= HPTE_LOCAL_UPDATE; /* Hash it in */ #ifdef CONFIG_PPC_HAS_HASH_64K if (mm->context.user_psize == MMU_PAGE_64K) - rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); + rc = __hash_page_64K(ea, access, vsid, ptep, trap, + update_flags, ssize); else #endif /* CONFIG_PPC_HAS_HASH_64K */ - rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, - subpage_protection(mm, ea)); + rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, + ssize, subpage_protection(mm, ea)); /* Dump some info in case of hash insertion failure, they should * never happen so it is really useful to know if/when they do @@ -1278,9 +1287,10 @@ out_exit: * do not forget to update the assembly call site ! */ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, - int local) + unsigned long flags) { unsigned long hash, index, shift, hidx, slot; + int local = flags & HPTE_LOCAL_UPDATE; DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { @@ -1315,6 +1325,78 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, #endif } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void flush_hash_hugepage(unsigned long vsid, unsigned long addr, + pmd_t *pmdp, unsigned int psize, int ssize, + unsigned long flags) +{ + int i, max_hpte_count, valid; + unsigned long s_addr; + unsigned char *hpte_slot_array; + unsigned long hidx, shift, vpn, hash, slot; + int local = flags & HPTE_LOCAL_UPDATE; + + s_addr = addr & HPAGE_PMD_MASK; + hpte_slot_array = get_hpte_slot_array(pmdp); + /* + * IF we try to do a HUGE PTE update after a withdraw is done. + * we will find the below NULL. This happens when we do + * split_huge_page_pmd + */ + if (!hpte_slot_array) + return; + + if (ppc_md.hugepage_invalidate) { + ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, + psize, ssize, local); + goto tm_abort; + } + /* + * No bluk hpte removal support, invalidate each entry + */ + shift = mmu_psize_defs[psize].shift; + max_hpte_count = HPAGE_PMD_SIZE >> shift; + for (i = 0; i < max_hpte_count; i++) { + /* + * 8 bits per each hpte entries + * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] + */ + valid = hpte_valid(hpte_slot_array, i); + if (!valid) + continue; + hidx = hpte_hash_index(hpte_slot_array, i); + + /* get the vpn */ + addr = s_addr + (i * (1ul << shift)); + vpn = hpt_vpn(addr, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + ppc_md.hpte_invalidate(slot, vpn, psize, + MMU_PAGE_16M, ssize, local); + } +tm_abort: +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Transactions are not aborted by tlbiel, only tlbie. + * Without, syncing a page back to a block device w/ PIO could pick up + * transactional data (bad!) so we force an abort here. Before the + * sync the page will be made read-only, which will flush_hash_page. + * BIG ISSUE here: if the kernel uses a page from userspace without + * unmapping it first, it may see the speculated version. + */ + if (local && cpu_has_feature(CPU_FTR_TM) && + current->thread.regs && + MSR_TM_ACTIVE(current->thread.regs->msr)) { + tm_enable(); + tm_abort(TM_CAUSE_TLBI); + } +#endif +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + void flush_hash_range(unsigned long number, int local) { if (ppc_md.flush_hash_range) @@ -1322,7 +1404,7 @@ void flush_hash_range(unsigned long number, int local) else { int i; struct ppc64_tlb_batch *batch = - &__get_cpu_var(ppc64_tlb_batch); + this_cpu_ptr(&ppc64_tlb_batch); for (i = 0; i < number; i++) flush_hash_page(batch->vpn[i], batch->pte[i], @@ -1432,7 +1514,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) mmu_kernel_ssize, 0); } -void kernel_map_pages(struct page *page, int numpages, int enable) +void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long flags, vaddr, lmi; int i; diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 5f5e6328c21c..86686514ae13 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -18,60 +18,9 @@ #include <linux/mm.h> #include <asm/machdep.h> -static void invalidate_old_hpte(unsigned long vsid, unsigned long addr, - pmd_t *pmdp, unsigned int psize, int ssize) -{ - int i, max_hpte_count, valid; - unsigned long s_addr; - unsigned char *hpte_slot_array; - unsigned long hidx, shift, vpn, hash, slot; - - s_addr = addr & HPAGE_PMD_MASK; - hpte_slot_array = get_hpte_slot_array(pmdp); - /* - * IF we try to do a HUGE PTE update after a withdraw is done. - * we will find the below NULL. This happens when we do - * split_huge_page_pmd - */ - if (!hpte_slot_array) - return; - - if (ppc_md.hugepage_invalidate) - return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, - psize, ssize); - /* - * No bluk hpte removal support, invalidate each entry - */ - shift = mmu_psize_defs[psize].shift; - max_hpte_count = HPAGE_PMD_SIZE >> shift; - for (i = 0; i < max_hpte_count; i++) { - /* - * 8 bits per each hpte entries - * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] - */ - valid = hpte_valid(hpte_slot_array, i); - if (!valid) - continue; - hidx = hpte_hash_index(hpte_slot_array, i); - - /* get the vpn */ - addr = s_addr + (i * (1ul << shift)); - vpn = hpt_vpn(addr, vsid, ssize); - hash = hpt_hash(vpn, shift, ssize); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, vpn, psize, - MMU_PAGE_16M, ssize, 0); - } -} - - int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, - pmd_t *pmdp, unsigned long trap, int local, int ssize, - unsigned int psize) + pmd_t *pmdp, unsigned long trap, unsigned long flags, + int ssize, unsigned int psize) { unsigned int index, valid; unsigned char *hpte_slot_array; @@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, * hash page table entries. */ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) - invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); + flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, + ssize, flags); } valid = hpte_valid(hpte_slot_array, index); @@ -158,7 +108,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, slot += hidx & _PTEIDX_GROUP_IX; ret = ppc_md.hpte_updatepp(slot, rflags, vpn, - psize, lpsize, ssize, local); + psize, lpsize, ssize, flags); /* * We failed to update, try to insert a new entry. */ diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 5e4ee2573903..ba47aaf33a4b 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -33,13 +33,13 @@ static inline int tlb1_next(void) ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - index = __get_cpu_var(next_tlbcam_idx); + index = this_cpu_read(next_tlbcam_idx); /* Just round-robin the entries and wrap when we hit the end */ if (unlikely(index == ncams - 1)) - __get_cpu_var(next_tlbcam_idx) = tlbcam_index; + __this_cpu_write(next_tlbcam_idx, tlbcam_index); else - __get_cpu_var(next_tlbcam_idx)++; + __this_cpu_inc(next_tlbcam_idx); return index; } diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index a5bcf9301196..d94b1af53a93 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -19,8 +19,8 @@ extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long vflags, int psize, int ssize); int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, - pte_t *ptep, unsigned long trap, int local, int ssize, - unsigned int shift, unsigned int mmu_psize) + pte_t *ptep, unsigned long trap, unsigned long flags, + int ssize, unsigned int shift, unsigned int mmu_psize) { unsigned long vpn; unsigned long old_pte, new_pte; @@ -81,7 +81,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, slot += (old_pte & _PAGE_F_GIX) >> 12; if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, - mmu_psize, ssize, local) == -1) + mmu_psize, ssize, flags) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6a4a5fcb9730..5ff4e07d920a 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -62,6 +62,9 @@ static unsigned nr_gpages; /* * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; + * + * Defined in such a way that we can optimize away code block at build time + * if CONFIG_HUGETLB_PAGE=n. */ int pmd_huge(pmd_t pmd) { @@ -230,7 +233,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) return NULL; - return hugepte_offset(hpdp, addr, pdshift); + return hugepte_offset(*hpdp, addr, pdshift); } #else @@ -270,13 +273,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) return NULL; - return hugepte_offset(hpdp, addr, pdshift); + return hugepte_offset(*hpdp, addr, pdshift); } #endif #ifdef CONFIG_PPC_FSL_BOOK3E /* Build list of addresses of gigantic pages. This function is used in early - * boot before the buddy or bootmem allocator is setup. + * boot before the buddy allocator is setup. */ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { @@ -312,7 +315,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate) * If gpages can be in highmem we can't use the trick of storing the * data structure in the page; allocate space for this */ - m = alloc_bootmem(sizeof(struct huge_bootmem_page)); + m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0); m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; #else m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); @@ -352,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val, if (size != 0) { if (sscanf(val, "%lu", &npages) <= 0) npages = 0; + if (npages > MAX_NUMBER_GPAGES) { + pr_warn("MMU: %lu pages requested for page " + "size %llu KB, limiting to " + __stringify(MAX_NUMBER_GPAGES) "\n", + npages, size / 1024); + npages = MAX_NUMBER_GPAGES; + } gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; size = 0; } @@ -399,7 +409,7 @@ void __init reserve_hugetlb_gpages(void) #else /* !PPC_FSL_BOOK3E */ /* Build list of addresses of gigantic pages. This function is used in early - * boot before the buddy or bootmem allocator is setup. + * boot before the buddy allocator is setup. */ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { @@ -462,7 +472,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) { struct hugepd_freelist **batchp; - batchp = &get_cpu_var(hugepd_freelist_cur); + batchp = this_cpu_ptr(&hugepd_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpumask_equal(mm_cpumask(tlb->mm), @@ -536,7 +546,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, do { pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); - if (!is_hugepd(pmd)) { + if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { /* * if it is not hugepd pointer, we should already find * it cleared. @@ -585,7 +595,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, do { pud = pud_offset(pgd, addr); next = pud_addr_end(addr, end); - if (!is_hugepd(pud)) { + if (!is_hugepd(__hugepd(pud_val(*pud)))) { if (pud_none_or_clear_bad(pud)) continue; hugetlb_free_pmd_range(tlb, pud, addr, next, floor, @@ -651,7 +661,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, do { next = pgd_addr_end(addr, end); pgd = pgd_offset(tlb->mm, addr); - if (!is_hugepd(pgd)) { + if (!is_hugepd(__hugepd(pgd_val(*pgd)))) { if (pgd_none_or_clear_bad(pgd)) continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); @@ -711,12 +721,11 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, return (__boundary - 1 < end - 1) ? __boundary : end; } -int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, - unsigned long addr, unsigned long end, - int write, struct page **pages, int *nr) +int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift, + unsigned long end, int write, struct page **pages, int *nr) { pte_t *ptep; - unsigned long sz = 1UL << hugepd_shift(*hugepd); + unsigned long sz = 1UL << hugepd_shift(hugepd); unsigned long next; ptep = hugepte_offset(hugepd, addr, pdshift); @@ -959,7 +968,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift else if (pgd_huge(pgd)) { ret_pte = (pte_t *) pgdp; goto out; - } else if (is_hugepd(&pgd)) + } else if (is_hugepd(__hugepd(pgd_val(pgd)))) hpdp = (hugepd_t *)&pgd; else { /* @@ -976,7 +985,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift else if (pud_huge(pud)) { ret_pte = (pte_t *) pudp; goto out; - } else if (is_hugepd(&pud)) + } else if (is_hugepd(__hugepd(pud_val(pud)))) hpdp = (hugepd_t *)&pud; else { pdshift = PMD_SHIFT; @@ -997,7 +1006,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift if (pmd_huge(pmd) || pmd_large(pmd)) { ret_pte = (pte_t *) pmdp; goto out; - } else if (is_hugepd(&pmd)) + } else if (is_hugepd(__hugepd(pmd_val(pmd)))) hpdp = (hugepd_t *)&pmd; else return pte_offset_kernel(&pmd, ea); @@ -1006,7 +1015,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift if (!hpdp) return NULL; - ret_pte = hugepte_offset(hpdp, ea, pdshift); + ret_pte = hugepte_offset(*hpdp, ea, pdshift); pdshift = hugepd_shift(*hpdp); out: if (shift) @@ -1036,14 +1045,6 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, if ((pte_val(pte) & mask) != mask) return 0; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - /* - * check for splitting here - */ - if (pmd_trans_splitting(pte_pmd(pte))) - return 0; -#endif - /* hugepages are never "special" */ VM_BUG_ON(!pfn_valid(pte_pfn(pte))); diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 415a51b028b9..a10be665b645 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -26,7 +26,6 @@ #include <linux/mm.h> #include <linux/stddef.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/initrd.h> #include <linux/pagemap.h> @@ -195,15 +194,6 @@ void __init MMU_init(void) memblock_set_current_limit(lowmem_end_addr); } -/* This is only called until mem_init is done. */ -void __init *early_get_page(void) -{ - if (init_bootmem_done) - return alloc_bootmem_pages(PAGE_SIZE); - else - return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); -} - #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 3481556a1880..10471f9bb63f 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -34,7 +34,6 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/idr.h> #include <linux/nodemask.h> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 8ebaac75c940..b7285a5870f8 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -35,6 +35,7 @@ #include <linux/memblock.h> #include <linux/hugetlb.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <asm/pgalloc.h> #include <asm/prom.h> @@ -60,7 +61,6 @@ #define CPU_FTR_NOEXECUTE 0 #endif -int init_bootmem_done; int mem_init_done; unsigned long long memory_limit; @@ -144,8 +144,17 @@ int arch_remove_memory(u64 start, u64 size) zone = page_zone(pfn_to_page(start_pfn)); ret = __remove_pages(zone, start_pfn, nr_pages); - if (!ret && (ppc_md.remove_memory)) - ret = ppc_md.remove_memory(start, size); + if (ret) + return ret; + + /* Remove htab bolted mappings for this section of memory */ + start = (unsigned long)__va(start); + ret = remove_section_mapping(start, start + size); + + /* Ensure all vmalloc mappings are flushed in case they also + * hit that section of memory + */ + vm_unmap_aliases(); return ret; } @@ -180,70 +189,23 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, } EXPORT_SYMBOL_GPL(walk_system_ram_range); -/* - * Initialize the bootmem system and give it all the memory we - * have available. If we are using highmem, we only put the - * lowmem into the bootmem system. - */ #ifndef CONFIG_NEED_MULTIPLE_NODES -void __init do_init_bootmem(void) +void __init initmem_init(void) { - unsigned long start, bootmap_pages; - unsigned long total_pages; - struct memblock_region *reg; - int boot_mapsize; - max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; - total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; + min_low_pfn = MEMORY_START >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM - total_pages = total_lowmem >> PAGE_SHIFT; max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; #endif - /* - * Find an area to use for the bootmem bitmap. Calculate the size of - * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. - * Add 1 additional page in case the address isn't page-aligned. - */ - bootmap_pages = bootmem_bootmap_pages(total_pages); - - start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); - - min_low_pfn = MEMORY_START >> PAGE_SHIFT; - boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); - /* Place all memblock_regions in the same node and merge contiguous * memblock_regions */ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); - /* Add all physical memory to the bootmem map, mark each area - * present. - */ -#ifdef CONFIG_HIGHMEM - free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); - - /* reserve the sections we're already using */ - for_each_memblock(reserved, reg) { - unsigned long top = reg->base + reg->size - 1; - if (top < lowmem_end_addr) - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); - else if (reg->base < lowmem_end_addr) { - unsigned long trunc_size = lowmem_end_addr - reg->base; - reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); - } - } -#else - free_bootmem_with_active_regions(0, max_pfn); - - /* reserve the sections we're already using */ - for_each_memblock(reserved, reg) - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); -#endif /* XXX need to clip this if using highmem? */ sparse_memory_present_with_active_regions(0); - - init_bootmem_done = 1; + sparse_init(); } /* mark pages that don't exist as nosave */ @@ -359,14 +321,6 @@ void __init paging_init(void) mark_nonram_nosave(); } -static void __init register_page_bootmem_info(void) -{ - int i; - - for_each_online_node(i) - register_page_bootmem_info_node(NODE_DATA(i)); -} - void __init mem_init(void) { /* @@ -379,7 +333,6 @@ void __init mem_init(void) swiotlb_init(0); #endif - register_page_bootmem_info(); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); set_max_mapnr(max_pfn); free_all_bootmem(); diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 928ebe79668b..9cba6cba2e50 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -421,12 +421,12 @@ void __init mmu_context_init(void) /* * Allocate the maps used by context management */ - context_map = alloc_bootmem(CTX_MAP_SIZE); - context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); + context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0); + context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0); #ifndef CONFIG_SMP - stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); + stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0); #else - stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE); + stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); register_cpu_notifier(&mmu_context_cpu_nb); #endif diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 9615d82919b8..78c45f392f5b 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, { __tlbil_va(address, pid); } -#endif /* CONIFG_8xx */ +#endif /* CONFIG_8xx */ #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) extern void _tlbivax_bcast(unsigned long address, unsigned int pid, diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b9d1dfdbe5bb..0257a7d659ef 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -134,28 +134,6 @@ static int __init fake_numa_create_new_node(unsigned long end_pfn, return 0; } -/* - * get_node_active_region - Return active region containing pfn - * Active range returned is empty if none found. - * @pfn: The page to return the region for - * @node_ar: Returned set to the active region containing @pfn - */ -static void __init get_node_active_region(unsigned long pfn, - struct node_active_region *node_ar) -{ - unsigned long start_pfn, end_pfn; - int i, nid; - - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { - if (pfn >= start_pfn && pfn < end_pfn) { - node_ar->nid = nid; - node_ar->start_pfn = start_pfn; - node_ar->end_pfn = end_pfn; - break; - } - } -} - static void reset_numa_cpu_lookup_table(void) { unsigned int cpu; @@ -928,134 +906,48 @@ static void __init dump_numa_memory_topology(void) } } -/* - * Allocate some memory, satisfying the memblock or bootmem allocator where - * required. nid is the preferred node and end is the physical address of - * the highest address in the node. - * - * Returns the virtual address of the memory. - */ -static void __init *careful_zallocation(int nid, unsigned long size, - unsigned long align, - unsigned long end_pfn) -{ - void *ret; - int new_nid; - unsigned long ret_paddr; - - ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); - - /* retry over all memory */ - if (!ret_paddr) - ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); - - if (!ret_paddr) - panic("numa.c: cannot allocate %lu bytes for node %d", - size, nid); - - ret = __va(ret_paddr); - - /* - * We initialize the nodes in numeric order: 0, 1, 2... - * and hand over control from the MEMBLOCK allocator to the - * bootmem allocator. If this function is called for - * node 5, then we know that all nodes <5 are using the - * bootmem allocator instead of the MEMBLOCK allocator. - * - * So, check the nid from which this allocation came - * and double check to see if we need to use bootmem - * instead of the MEMBLOCK. We don't free the MEMBLOCK memory - * since it would be useless. - */ - new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); - if (new_nid < nid) { - ret = __alloc_bootmem_node(NODE_DATA(new_nid), - size, align, 0); - - dbg("alloc_bootmem %p %lx\n", ret, size); - } - - memset(ret, 0, size); - return ret; -} - static struct notifier_block ppc64_numa_nb = { .notifier_call = cpu_numa_callback, .priority = 1 /* Must run before sched domains notifier. */ }; -static void __init mark_reserved_regions_for_nid(int nid) +/* Initialize NODE_DATA for a node on the local memory */ +static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { - struct pglist_data *node = NODE_DATA(nid); - struct memblock_region *reg; - - for_each_memblock(reserved, reg) { - unsigned long physbase = reg->base; - unsigned long size = reg->size; - unsigned long start_pfn = physbase >> PAGE_SHIFT; - unsigned long end_pfn = PFN_UP(physbase + size); - struct node_active_region node_ar; - unsigned long node_end_pfn = pgdat_end_pfn(node); - - /* - * Check to make sure that this memblock.reserved area is - * within the bounds of the node that we care about. - * Checking the nid of the start and end points is not - * sufficient because the reserved area could span the - * entire node. - */ - if (end_pfn <= node->node_start_pfn || - start_pfn >= node_end_pfn) - continue; - - get_node_active_region(start_pfn, &node_ar); - while (start_pfn < end_pfn && - node_ar.start_pfn < node_ar.end_pfn) { - unsigned long reserve_size = size; - /* - * if reserved region extends past active region - * then trim size to active region - */ - if (end_pfn > node_ar.end_pfn) - reserve_size = (node_ar.end_pfn << PAGE_SHIFT) - - physbase; - /* - * Only worry about *this* node, others may not - * yet have valid NODE_DATA(). - */ - if (node_ar.nid == nid) { - dbg("reserve_bootmem %lx %lx nid=%d\n", - physbase, reserve_size, node_ar.nid); - reserve_bootmem_node(NODE_DATA(node_ar.nid), - physbase, reserve_size, - BOOTMEM_DEFAULT); - } - /* - * if reserved region is contained in the active region - * then done. - */ - if (end_pfn <= node_ar.end_pfn) - break; - - /* - * reserved region extends past the active region - * get next active region that contains this - * reserved region - */ - start_pfn = node_ar.end_pfn; - physbase = start_pfn << PAGE_SHIFT; - size = size - reserve_size; - get_node_active_region(start_pfn, &node_ar); - } - } + u64 spanned_pages = end_pfn - start_pfn; + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + if (spanned_pages) + pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", + nid, start_pfn << PAGE_SHIFT, + (end_pfn << PAGE_SHIFT) - 1); + else + pr_info("Initmem setup node %d\n", nid); + + nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != nid) + pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = spanned_pages; } - -void __init do_init_bootmem(void) +void __init initmem_init(void) { int nid, cpu; - min_low_pfn = 0; max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn; @@ -1064,64 +956,18 @@ void __init do_init_bootmem(void) else dump_numa_memory_topology(); + memblock_dump_all(); + for_each_online_node(nid) { unsigned long start_pfn, end_pfn; - void *bootmem_vaddr; - unsigned long bootmap_pages; get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); - - /* - * Allocate the node structure node local if possible - * - * Be careful moving this around, as it relies on all - * previous nodes' bootmem to be initialized and have - * all reserved areas marked. - */ - NODE_DATA(nid) = careful_zallocation(nid, - sizeof(struct pglist_data), - SMP_CACHE_BYTES, end_pfn); - - dbg("node %d\n", nid); - dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); - - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; - NODE_DATA(nid)->node_start_pfn = start_pfn; - NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; - - if (NODE_DATA(nid)->node_spanned_pages == 0) - continue; - - dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); - dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); - - bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - bootmem_vaddr = careful_zallocation(nid, - bootmap_pages << PAGE_SHIFT, - PAGE_SIZE, end_pfn); - - dbg("bootmap_vaddr = %p\n", bootmem_vaddr); - - init_bootmem_node(NODE_DATA(nid), - __pa(bootmem_vaddr) >> PAGE_SHIFT, - start_pfn, end_pfn); - - free_bootmem_with_active_regions(nid, end_pfn); - /* - * Be very careful about moving this around. Future - * calls to careful_zallocation() depend on this getting - * done correctly. - */ - mark_reserved_regions_for_nid(nid); + setup_node_data(nid, start_pfn, end_pfn); sparse_memory_present_with_active_regions(nid); } - init_bootmem_done = 1; + sparse_init(); - /* - * Now bootmem is initialised we can create the node to cpumask - * lookup tables and setup the cpu callback to populate them. - */ setup_node_to_cpumask_map(); reset_numa_cpu_lookup_table(); @@ -1711,12 +1557,11 @@ static void stage_topology_update(int core_id) static int dt_update_callback(struct notifier_block *nb, unsigned long action, void *data) { - struct of_prop_reconfig *update; + struct of_reconfig_data *update = data; int rc = NOTIFY_DONE; switch (action) { case OF_RECONFIG_UPDATE_PROPERTY: - update = (struct of_prop_reconfig *)data; if (!of_prop_cmp(update->dn->type, "cpu") && !of_prop_cmp(update->prop->name, "ibm,associativity")) { u32 core_id; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index cf11342bf519..50fad3801f30 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -100,12 +100,11 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add { pte_t *pte; extern int mem_init_done; - extern void *early_get_page(void); if (mem_init_done) { pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); } else { - pte = (pte_t *)early_get_page(); + pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); if (pte) clear_page(pte); } @@ -430,7 +429,7 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot) } -void kernel_map_pages(struct page *page, int numpages, int enable) +void __kernel_map_pages(struct page *page, int numpages, int enable) { if (PageHighMem(page)) return; diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index c8d709ab489d..4fe5f64cc179 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -33,9 +33,9 @@ #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> -#include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/slab.h> +#include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/page.h> @@ -51,6 +51,7 @@ #include <asm/cputable.h> #include <asm/sections.h> #include <asm/firmware.h> +#include <asm/dma.h> #include "mmu_decl.h" @@ -75,11 +76,7 @@ static __ref void *early_alloc_pgtable(unsigned long size) { void *pt; - if (init_bootmem_done) - pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); - else - pt = __va(memblock_alloc_base(size, size, - __pa(MAX_DMA_ADDRESS))); + pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); memset(pt, 0, size); return pt; @@ -113,10 +110,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags) __pgprot(flags))); } else { #ifdef CONFIG_PPC_MMU_NOHASH - /* Warning ! This will blow up if bootmem is not initialized - * which our ppc64 code is keen to do that, we'll need to - * fix it and/or be more careful - */ pgdp = pgd_offset_k(ea); #ifdef PUD_TABLE_SIZE if (pgd_none(*pgdp)) { @@ -352,16 +345,31 @@ EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); EXPORT_SYMBOL(__iounmap_at); +#ifndef __PAGETABLE_PUD_FOLDED +/* 4 level page table */ +struct page *pgd_page(pgd_t pgd) +{ + if (pgd_huge(pgd)) + return pte_page(pgd_pte(pgd)); + return virt_to_page(pgd_page_vaddr(pgd)); +} +#endif + +struct page *pud_page(pud_t pud) +{ + if (pud_huge(pud)) + return pte_page(pud_pte(pud)); + return virt_to_page(pud_page_vaddr(pud)); +} + /* * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. */ struct page *pmd_page(pmd_t pmd) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_trans_huge(pmd)) + if (pmd_trans_huge(pmd) || pmd_huge(pmd)) return pfn_to_page(pmd_pfn(pmd)); -#endif return virt_to_page(pmd_page_vaddr(pmd)); } @@ -731,29 +739,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long old_pmd) { - int ssize, i; - unsigned long s_addr; - int max_hpte_count; - unsigned int psize, valid; - unsigned char *hpte_slot_array; - unsigned long hidx, vpn, vsid, hash, shift, slot; - - /* - * Flush all the hptes mapping this hugepage - */ - s_addr = addr & HPAGE_PMD_MASK; - hpte_slot_array = get_hpte_slot_array(pmdp); - /* - * IF we try to do a HUGE PTE update after a withdraw is done. - * we will find the below NULL. This happens when we do - * split_huge_page_pmd - */ - if (!hpte_slot_array) - return; + int ssize; + unsigned int psize; + unsigned long vsid; + unsigned long flags = 0; + const struct cpumask *tmp; /* get the base page size,vsid and segment size */ #ifdef CONFIG_DEBUG_VM - psize = get_slice_psize(mm, s_addr); + psize = get_slice_psize(mm, addr); BUG_ON(psize == MMU_PAGE_16M); #endif if (old_pmd & _PAGE_COMBO) @@ -761,46 +755,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, else psize = MMU_PAGE_64K; - if (!is_kernel_addr(s_addr)) { - ssize = user_segment_size(s_addr); - vsid = get_vsid(mm->context.id, s_addr, ssize); + if (!is_kernel_addr(addr)) { + ssize = user_segment_size(addr); + vsid = get_vsid(mm->context.id, addr, ssize); WARN_ON(vsid == 0); } else { - vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); + vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } - if (ppc_md.hugepage_invalidate) - return ppc_md.hugepage_invalidate(vsid, s_addr, - hpte_slot_array, - psize, ssize); - /* - * No bluk hpte removal support, invalidate each entry - */ - shift = mmu_psize_defs[psize].shift; - max_hpte_count = HPAGE_PMD_SIZE >> shift; - for (i = 0; i < max_hpte_count; i++) { - /* - * 8 bits per each hpte entries - * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] - */ - valid = hpte_valid(hpte_slot_array, i); - if (!valid) - continue; - hidx = hpte_hash_index(hpte_slot_array, i); - - /* get the vpn */ - addr = s_addr + (i * (1ul << shift)); - vpn = hpt_vpn(addr, vsid, ssize); - hash = hpt_hash(vpn, shift, ssize); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, vpn, psize, - MMU_PAGE_16M, ssize, 0); - } + tmp = cpumask_of(smp_processor_id()); + if (cpumask_equal(mm_cpumask(mm), tmp)) + flags |= HPTE_LOCAL_UPDATE; + + return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); } static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 9aee27c582dc..c406aa95b2bc 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -87,6 +87,9 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) + +#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ @@ -96,6 +99,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ ___PPC_RA(base) | ___PPC_RB(b)) /* Convenience helpers for the above with 'far' offsets: */ +#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LBZ(r, r, IMM_L(i)); } } while(0) + #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ PPC_LD(r, r, IMM_L(i)); } } while(0) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index cbae2dfd053c..1ca125b9c226 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -181,6 +181,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, } break; case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -190,9 +191,13 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } - PPC_DIVWU(r_scratch1, r_A, r_X); - PPC_MUL(r_scratch1, r_X, r_scratch1); - PPC_SUB(r_A, r_A, r_scratch1); + if (code == (BPF_ALU | BPF_MOD | BPF_X)) { + PPC_DIVWU(r_scratch1, r_A, r_X); + PPC_MUL(r_scratch1, r_X, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + } else { + PPC_DIVWU(r_A, r_A, r_X); + } break; case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ PPC_LI32(r_scratch2, K); @@ -200,22 +205,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_MUL(r_scratch1, r_scratch2, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ - ctx->seen |= SEEN_XREG; - PPC_CMPWI(r_X, 0); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - /* - * Exit, returning 0; first pass hits here - * (longer worst-case code size). - */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - PPC_LI(r_ret, 0); - PPC_JMP(exit_addr); - } - PPC_DIVWU(r_A, r_A, r_X); - break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; @@ -361,6 +350,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, protocol)); break; case BPF_ANC | SKF_AD_IFINDEX: + case BPF_ANC | SKF_AD_HATYPE: + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + ifindex) != 4); + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + type) != 2); PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); @@ -368,14 +362,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { /* Exit, returning 0; first pass hits here. */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12); PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - ifindex) != 4); - PPC_LWZ_OFFS(r_A, r_scratch1, + if (code == (BPF_ANC | SKF_AD_IFINDEX)) { + PPC_LWZ_OFFS(r_A, r_scratch1, offsetof(struct net_device, ifindex)); + } else { + PPC_LHZ_OFFS(r_A, r_scratch1, + offsetof(struct net_device, type)); + } + break; case BPF_ANC | SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); @@ -407,6 +405,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); break; + case BPF_ANC | SKF_AD_PKTTYPE: + PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET()); + PPC_ANDI(r_A, r_A, PKT_TYPE_MAX); + PPC_SRWI(r_A, r_A, 5); + break; case BPF_ANC | SKF_AD_CPU: #ifdef CONFIG_SMP /* diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index 6adf55fa5d88..ecc66d5f02c9 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -10,7 +10,7 @@ #include <linux/oprofile.h> #include <linux/sched.h> #include <asm/processor.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/compat.h> #include <asm/oprofile_impl.h> @@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) first_frame = 0; } } else { + pagefault_disable(); #ifdef CONFIG_PPC64 if (!is_32bit_task()) { while (depth--) { @@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) break; first_frame = 0; } - + pagefault_enable(); return; } #endif @@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) break; first_frame = 0; } + pagefault_enable(); } } diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index 28f1af2db1f5..1c27831df1ac 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c @@ -331,8 +331,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, if (mm->exe_file) { app_cookie = fast_get_dcookie(&mm->exe_file->f_path); - pr_debug("got dcookie for %s\n", - mm->exe_file->f_dentry->d_name.name); + pr_debug("got dcookie for %pD\n", mm->exe_file); } for (vma = mm->mmap; vma; vma = vma->vm_next) { @@ -342,15 +341,14 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, if (!vma->vm_file) goto fail_no_image_cookie; - pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n", - my_offset, spu_ref, - vma->vm_file->f_dentry->d_name.name); + pr_debug("Found spu ELF at %X(object-id:%lx) for file %pD\n", + my_offset, spu_ref, vma->vm_file); *offsetp = my_offset; break; } *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path); - pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name); + pr_debug("got dcookie for %pD\n", vma->vm_file); up_read(&mm->mmap_sem); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index a6995d4e93d4..7c4f6690533a 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void) static void power_pmu_bhrb_enable(struct perf_event *event) { - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); if (!ppmu->bhrb_nr) return; @@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event) static void power_pmu_bhrb_disable(struct perf_event *event) { - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); if (!ppmu->bhrb_nr) return; @@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu) if (!ppmu) return; local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) { /* @@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu) return; local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) goto out; @@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) * Add the event to the list (if there is room) * and check whether the total set is still feasible. */ - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); n0 = cpuhw->n_events; if (n0 >= ppmu->n_counter) goto out; @@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags) power_pmu_read(event); - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); for (i = 0; i < cpuhw->n_events; ++i) { if (event == cpuhw->event[i]) { while (++i < cpuhw->n_events) { @@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags) */ static void power_pmu_start_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; @@ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu) */ static void power_pmu_cancel_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); @@ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu) if (!ppmu) return -EAGAIN; - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); n = cpuhw->n_events; if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) return -EAGAIN; @@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { struct cpu_hw_events *cpuhw; - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_read(cpuhw); data.br_stack = &cpuhw->bhrb_stack; } @@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val) static void perf_event_interrupt(struct pt_regs *regs) { int i, j; - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; unsigned long val[8]; int found, active; diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index d35ae52c69dc..4acaea01fe03 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu) unsigned long flags; local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; @@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu) unsigned long flags; local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_events); + cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) goto out; @@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, static void perf_event_interrupt(struct pt_regs *regs) { int i; - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; unsigned long val; int found = 0; diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 82f2da28cd27..d2ac1c116454 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -220,7 +220,6 @@ config AKEBONO select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD select MMC_SDHCI select MMC_SDHCI_PLTFM - select MMC_SDHCI_OF_476GTR select ATA select SATA_AHCI_PLATFORM help diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 58db9d083969..c11ce6516c8f 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -94,7 +94,7 @@ static int avr_probe(struct i2c_client *client, { avr_i2c_client = client; ppc_md.restart = avr_reset_system; - ppc_md.power_off = avr_power_off_system; + pm_power_off = avr_power_off_system; return 0; } diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index e996e007bc44..711f3d352af7 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -18,7 +18,7 @@ #include <linux/irq.h> #include <linux/of_platform.h> #include <linux/fsl-diu-fb.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> #include <sysdev/fsl_soc.h> #include <asm/cacheflush.h> @@ -297,14 +297,13 @@ static void __init mpc512x_setup_diu(void) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and - * also it's length is passed to reserve_bootmem(). It will be + * also it's length is passed to memblock_reserve(). It will be * freed later on first open of fbdev, when splash image is not * needed any more. */ if (diu_shared_fb.in_use) { - ret = reserve_bootmem(diu_shared_fb.fb_phys, - diu_shared_fb.fb_len, - BOOTMEM_EXCLUSIVE); + ret = memblock_reserve(diu_shared_fb.fb_phys, + diu_shared_fb.fb_len); if (ret) { pr_err("%s: reserve bootmem failed\n", __func__); diu_shared_fb.in_use = false; diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 3feffde9128d..6af651e69129 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -212,6 +212,8 @@ static int __init efika_probe(void) DMA_MODE_READ = 0x44; DMA_MODE_WRITE = 0x48; + pm_power_off = rtas_power_off; + return 1; } @@ -225,7 +227,6 @@ define_machine(efika) .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, .restart = rtas_restart, - .power_off = rtas_power_off, .halt = rtas_halt, .set_rtc_time = rtas_set_rtc_time, .get_rtc_time = rtas_get_rtc_time, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 692998244d2c..c949ca055712 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -782,7 +782,6 @@ static const struct of_device_id mpc52xx_gpt_match[] = { static struct platform_driver mpc52xx_gpt_driver = { .driver = { .name = "mpc52xx-gpt", - .owner = THIS_MODULE, .of_match_table = mpc52xx_gpt_match, }, .probe = mpc52xx_gpt_probe, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index f8f0081759fb..251dcb90ef34 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -572,7 +572,6 @@ static const struct of_device_id mpc52xx_lpbfifo_match[] = { static struct platform_driver mpc52xx_lpbfifo_driver = { .driver = { .name = "mpc52xx-lpbfifo", - .owner = THIS_MODULE, .of_match_table = mpc52xx_lpbfifo_match, }, .probe = mpc52xx_lpbfifo_probe, diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 3d0c3a01143d..a0cb8bd41958 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -169,7 +169,6 @@ static const struct of_device_id ep8248e_mdio_match[] = { static struct platform_driver ep8248e_mdio_driver = { .driver = { .name = "ep8248e-mdio-bitbang", - .owner = THIS_MODULE, .of_match_table = ep8248e_mdio_match, }, .probe = ep8248e_mdio_probe, diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 463fa91ee5b6..15e8021ddef9 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -167,10 +167,10 @@ static int mcu_probe(struct i2c_client *client, const struct i2c_device_id *id) if (ret) goto err; - /* XXX: this is potentially racy, but there is no lock for ppc_md */ - if (!ppc_md.power_off) { + /* XXX: this is potentially racy, but there is no lock for pm_power_off */ + if (!pm_power_off) { glob_mcu = mcu; - ppc_md.power_off = mcu_power_off; + pm_power_off = mcu_power_off; dev_info(&client->dev, "will provide power-off service\n"); } @@ -197,7 +197,7 @@ static int mcu_remove(struct i2c_client *client) device_remove_file(&client->dev, &dev_attr_status); if (glob_mcu == mcu) { - ppc_md.power_off = NULL; + pm_power_off = NULL; glob_mcu = NULL; } diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index eeb80e25214d..c9adbfb65006 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -435,7 +435,6 @@ static const struct of_device_id pmc_match[] = { static struct platform_driver pmc_driver = { .driver = { .name = "mpc83xx-pmc", - .owner = THIS_MODULE, .of_match_table = pmc_match, }, .probe = pmc_probe, diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index e56b89a792ed..1f309ccb096e 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -170,7 +170,7 @@ static int __init corenet_generic_probe(void) ppc_md.get_irq = ehv_pic_get_irq; ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; + pm_power_off = fsl_hv_halt; ppc_md.halt = fsl_hv_halt; #ifdef CONFIG_SMP /* diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 8162b0412117..79fd0dfd4b82 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -120,7 +120,7 @@ static int gpio_halt_probe(struct platform_device *pdev) /* Register our halt function */ ppc_md.halt = gpio_halt_cb; - ppc_md.power_off = gpio_halt_cb; + pm_power_off = gpio_halt_cb; printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d" " irq).\n", gpio, trigger, irq); @@ -137,7 +137,7 @@ static int gpio_halt_remove(struct platform_device *pdev) free_irq(irq, halt_node); ppc_md.halt = NULL; - ppc_md.power_off = NULL; + pm_power_off = NULL; gpio_free(gpio); @@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(of, gpio_halt_match); static struct platform_driver gpio_halt_driver = { .driver = { .name = "gpio-halt", - .owner = THIS_MODULE, .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index bd6f1a1cf922..157250426b56 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -1,6 +1,3 @@ -config FADS - bool - config CPM1 bool select CPM @@ -13,7 +10,6 @@ choice config MPC8XXFADS bool "FADS" - select FADS config MPC86XADS bool "MPC86XADS" diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 862b32702d29..623bd961465a 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -279,7 +279,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) irq_set_msi_desc(virq, entry); msg.data = virq; - write_msi_msg(virq, &msg); + pci_write_msi_msg(virq, &msg); } return 0; @@ -301,9 +301,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) } static struct irq_chip msic_irq_chip = { - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, - .irq_shutdown = mask_msi_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, + .irq_shutdown = pci_msi_mask_irq, .name = "AXON-MSI", }; @@ -437,7 +437,6 @@ static struct platform_driver axon_msi_driver = { .shutdown = axon_msi_shutdown, .driver = { .name = "axon-msi", - .owner = THIS_MODULE, .of_match_table = axon_msi_device_id, }, }; diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c index d4d245c0d787..bee9232fe619 100644 --- a/arch/powerpc/platforms/cell/beat_htab.c +++ b/arch/powerpc/platforms/cell/beat_htab.c @@ -186,7 +186,7 @@ static long beat_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int psize, int apsize, - int ssize, int local) + int ssize, unsigned long flags) { unsigned long lpar_rc; u64 dummy0, dummy1; @@ -369,7 +369,7 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, unsigned long newpp, unsigned long vpn, int psize, int apsize, - int ssize, int local) + int ssize, unsigned long flags) { unsigned long lpar_rc; unsigned long want_v; diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index 2b98a36ef8fb..3ce70ded2d6a 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c @@ -29,7 +29,7 @@ #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/pci_regs.h> #include <linux/of.h> #include <linux/of_device.h> @@ -401,11 +401,11 @@ error: } else { if (config && *config) { size = 256; - free_bootmem(__pa(*config), size); + memblock_free(__pa(*config), size); } if (res && *res) { size = sizeof(struct celleb_pci_resource); - free_bootmem(__pa(*res), size); + memblock_free(__pa(*res), size); } } diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c index 844c0facb4f7..9438bbed402f 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_epci.c +++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c @@ -25,7 +25,6 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/pci_regs.h> -#include <linux/bootmem.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c index 4278acfa2ede..f22387598040 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c @@ -25,7 +25,6 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/delay.h> #include <linux/interrupt.h> diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c index 34e8ce2976aa..90be8ec51686 100644 --- a/arch/powerpc/platforms/cell/celleb_setup.c +++ b/arch/powerpc/platforms/cell/celleb_setup.c @@ -142,6 +142,7 @@ static int __init celleb_probe_beat(void) powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS | FW_FEATURE_BEAT | FW_FEATURE_LPAR; hpte_init_beat_v3(); + pm_power_off = beat_power_off; return 1; } @@ -190,6 +191,7 @@ static int __init celleb_probe_native(void) powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS; hpte_init_native(); + pm_power_off = rtas_power_off; return 1; } @@ -204,7 +206,6 @@ define_machine(celleb_beat) { .setup_arch = celleb_setup_arch_beat, .show_cpuinfo = celleb_show_cpuinfo, .restart = beat_restart, - .power_off = beat_power_off, .halt = beat_halt, .get_rtc_time = beat_get_rtc_time, .set_rtc_time = beat_set_rtc_time, @@ -230,7 +231,6 @@ define_machine(celleb_native) { .setup_arch = celleb_setup_arch_native, .show_cpuinfo = celleb_show_cpuinfo, .restart = rtas_restart, - .power_off = rtas_power_off, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 8a106b4172e0..4c11421847be 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d) static void iic_eoi(struct irq_data *d) { - struct iic *iic = &__get_cpu_var(cpu_iic); + struct iic *iic = this_cpu_ptr(&cpu_iic); out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); BUG_ON(iic->eoi_ptr < 0); } @@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void) struct iic *iic; unsigned int virq; - iic = &__get_cpu_var(cpu_iic); + iic = this_cpu_ptr(&cpu_iic); *(unsigned long *) &pending = in_be64((u64 __iomem *) &iic->regs->pending_destr); if (!(pending.flags & CBE_IIC_IRQ_VALID)) @@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void) void iic_setup_cpu(void) { - out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); + out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff); } u8 iic_get_target_id(int cpu) diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c index 6e3409d590ac..d328140dc6f5 100644 --- a/arch/powerpc/platforms/cell/qpace_setup.c +++ b/arch/powerpc/platforms/cell/qpace_setup.c @@ -127,6 +127,7 @@ static int __init qpace_probe(void) return 0; hpte_init_native(); + pm_power_off = rtas_power_off; return 1; } @@ -137,7 +138,6 @@ define_machine(qpace) { .setup_arch = qpace_setup_arch, .show_cpuinfo = qpace_show_cpuinfo, .restart = rtas_restart, - .power_off = rtas_power_off, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 6ae25fb62015..d62aa982d530 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -259,6 +259,7 @@ static int __init cell_probe(void) return 0; hpte_init_native(); + pm_power_off = rtas_power_off; return 1; } @@ -269,7 +270,6 @@ define_machine(cell) { .setup_arch = cell_setup_arch, .show_cpuinfo = cell_show_cpuinfo, .restart = rtas_restart, - .power_off = rtas_power_off, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index ffcbd242e669..f7af74f83693 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -181,7 +181,8 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) return 0; } -extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX +extern int hash_page(unsigned long ea, unsigned long access, + unsigned long trap, unsigned long dsisr); //XXX static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { int ret; @@ -196,7 +197,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) (REGION_ID(ea) != USER_REGION_ID)) { spin_unlock(&spu->register_lock); - ret = hash_page(ea, _PAGE_PRESENT, 0x300); + ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr); spin_lock(&spu->register_lock); if (!ret) { diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index e45894a08118..d98f845ac777 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -144,7 +144,7 @@ int spufs_handle_class1(struct spu_context *ctx) access = (_PAGE_PRESENT | _PAGE_USER); access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; local_irq_save(flags); - ret = hash_page(ea, access, 0x300); + ret = hash_page(ea, access, 0x300, dsisr); local_irq_restore(flags); /* hashing failed, so try the actual fault handler */ diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 87ba7cf99cd7..1a3429e1ccb5 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir) struct dentry *dentry, *tmp; mutex_lock(&dir->d_inode->i_mutex); - list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { + list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { spin_lock(&dentry->d_lock); if (!(d_unhashed(dentry)) && dentry->d_inode) { dget_dlock(dentry); @@ -301,7 +301,7 @@ static int spufs_context_open(struct path *path) int ret; struct file *filp; - ret = get_unused_fd(); + ret = get_unused_fd_flags(0); if (ret < 0) return ret; @@ -518,7 +518,7 @@ static int spufs_gang_open(struct path *path) int ret; struct file *filp; - ret = get_unused_fd(); + ret = get_unused_fd_flags(0); if (ret < 0) return ret; diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 5b77b1919fd2..860a59eb8ea2 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -585,6 +585,8 @@ static int __init chrp_probe(void) DMA_MODE_READ = 0x44; DMA_MODE_WRITE = 0x48; + pm_power_off = rtas_power_off; + return 1; } @@ -597,7 +599,6 @@ define_machine(chrp) { .show_cpuinfo = chrp_show_cpuinfo, .init_IRQ = chrp_init_IRQ, .restart = rtas_restart, - .power_off = rtas_power_off, .halt = rtas_halt, .time_init = chrp_time_init, .set_rtc_time = chrp_set_rtc_time, diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c index bd4ba5d7d568..fe0ed6ee285e 100644 --- a/arch/powerpc/platforms/embedded6xx/gamecube.c +++ b/arch/powerpc/platforms/embedded6xx/gamecube.c @@ -67,6 +67,8 @@ static int __init gamecube_probe(void) if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube")) return 0; + pm_power_off = gamecube_power_off; + return 1; } @@ -80,7 +82,6 @@ define_machine(gamecube) { .probe = gamecube_probe, .init_early = gamecube_init_early, .restart = gamecube_restart, - .power_off = gamecube_power_off, .halt = gamecube_halt, .init_IRQ = flipper_pic_probe, .get_irq = flipper_pic_get_irq, diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index 168e1d80b2e5..540eeb58d3f0 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -147,6 +147,9 @@ static int __init linkstation_probe(void) if (!of_flat_dt_is_compatible(root, "linkstation")) return 0; + + pm_power_off = linkstation_power_off; + return 1; } @@ -158,7 +161,6 @@ define_machine(linkstation){ .show_cpuinfo = linkstation_show_cpuinfo, .get_irq = mpic_get_irq, .restart = linkstation_restart, - .power_off = linkstation_power_off, .halt = linkstation_halt, .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c index 20a8ed91962e..7feb325b636b 100644 --- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c +++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c @@ -247,7 +247,7 @@ void __init ug_udbg_init(void) np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-exi"); if (!np) { udbg_printf("%s: EXI node not found\n", __func__); - goto done; + goto out; } exi_io_base = ug_udbg_setup_exi_io_base(np); @@ -267,8 +267,8 @@ void __init ug_udbg_init(void) } done: - if (np) - of_node_put(np); + of_node_put(np); +out: return; } diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 388e29bab8f6..352592d3e44e 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -211,6 +211,8 @@ static int __init wii_probe(void) if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii")) return 0; + pm_power_off = wii_power_off; + return 1; } @@ -226,7 +228,6 @@ define_machine(wii) { .init_early = wii_init_early, .setup_arch = wii_setup_arch, .restart = wii_restart, - .power_off = wii_power_off, .halt = wii_halt, .init_IRQ = wii_pic_probe, .get_irq = flipper_pic_get_irq, diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index f7136aae8bbf..d3a13067ec42 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -15,7 +15,6 @@ #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/irq.h> #include <asm/sections.h> diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index cb1b0b35a0c6..56b85cd61aaf 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -169,7 +169,7 @@ static void __init maple_use_rtas_reboot_and_halt_if_present(void) if (rtas_service_present("system-reboot") && rtas_service_present("power-off")) { ppc_md.restart = rtas_restart; - ppc_md.power_off = rtas_power_off; + pm_power_off = rtas_power_off; ppc_md.halt = rtas_halt; } } @@ -312,6 +312,7 @@ static int __init maple_probe(void) alloc_dart_table(); hpte_init_native(); + pm_power_off = maple_power_off; return 1; } @@ -325,7 +326,6 @@ define_machine(maple) { .pci_irq_fixup = maple_pci_irq_fixup, .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, .restart = maple_restart, - .power_off = maple_power_off, .halt = maple_halt, .get_boot_time = maple_get_boot_time, .set_rtc_time = maple_set_rtc_time, diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index ada33358950d..ae3f47b25b18 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -305,7 +305,6 @@ static struct platform_driver gpio_mdio_driver = .remove = gpio_mdio_remove, .driver = { .name = "gpio-mdio-bitbang", - .owner = THIS_MODULE, .of_match_table = gpio_mdio_match, }, }; diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index 014d06e6d46b..60b03a1703d1 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -513,11 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = alloc_bootmem(NVRAM_SIZE); - if (nvram_image == NULL) { - printk(KERN_ERR "nvram: can't allocate ram image\n"); - return -ENOMEM; - } + nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 7e868ccf3b0d..04702db35d45 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -15,7 +15,6 @@ #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/irq.h> #include <linux/of_pci.h> diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index b127a29ac526..713d36d45d1d 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -632,6 +632,8 @@ static int __init pmac_probe(void) smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); #endif /* CONFIG_PMAC_SMU */ + pm_power_off = pmac_power_off; + return 1; } @@ -663,7 +665,6 @@ define_machine(powermac) { .get_irq = NULL, /* changed later */ .pci_irq_fixup = pmac_pci_irq_fixup, .restart = pmac_restart, - .power_off = pmac_power_off, .halt = pmac_halt, .time_init = pmac_time_init, .get_boot_time = pmac_get_boot_time, diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index eba9cb10619c..2809c9895288 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -11,7 +11,6 @@ * (at your option) any later version. */ -#include <linux/bootmem.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/io.h> @@ -354,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe) } else if (!(pe->state & EEH_PE_ISOLATED)) { eeh_pe_state_mark(pe, EEH_PE_ISOLATED); ioda_eeh_phb_diag(pe); + + if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data(pe->phb, pe->data); } return result; @@ -373,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) * moving forward, we have to return operational * state during PE reset. */ - if (pe->state & EEH_PE_CFG_BLOCKED) { + if (pe->state & EEH_PE_RESET) { result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE | EEH_STATE_MMIO_ENABLED | @@ -452,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) eeh_pe_state_mark(pe, EEH_PE_ISOLATED); ioda_eeh_phb_diag(pe); + + if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data(pe->phb, pe->data); } return result; @@ -731,7 +736,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) { - pnv_pci_dump_phb_diag_data(pe->phb, pe->data); + if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data(pe->phb, pe->data); return 0; } @@ -1087,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) !((*pe)->state & EEH_PE_ISOLATED)) { eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); ioda_eeh_phb_diag(*pe); + + if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) + pnv_pci_dump_phb_diag_data((*pe)->phb, + (*pe)->data); } /* diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c index e462ab947d16..693b6cdac691 100644 --- a/arch/powerpc/platforms/powernv/opal-async.c +++ b/arch/powerpc/platforms/powernv/opal-async.c @@ -71,6 +71,7 @@ int opal_async_get_token_interruptible(void) return token; } +EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible); int __opal_async_release_token(int token) { @@ -102,6 +103,7 @@ int opal_async_release_token(int token) return 0; } +EXPORT_SYMBOL_GPL(opal_async_release_token); int opal_async_wait_response(uint64_t token, struct opal_msg *msg) { @@ -120,6 +122,7 @@ int opal_async_wait_response(uint64_t token, struct opal_msg *msg) return 0; } +EXPORT_SYMBOL_GPL(opal_async_wait_response); static int opal_async_comp_event(struct notifier_block *nb, unsigned long msg_type, void *msg) diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index 499707ddaa9c..37dbee15769f 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -15,6 +15,8 @@ #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> #include <asm/opal.h> #include <asm/firmware.h> @@ -43,7 +45,7 @@ unsigned long __init opal_get_boot_time(void) long rc = OPAL_BUSY; if (!opal_check_token(OPAL_RTC_READ)) - goto out; + return 0; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); @@ -53,62 +55,33 @@ unsigned long __init opal_get_boot_time(void) mdelay(10); } if (rc != OPAL_SUCCESS) - goto out; + return 0; y_m_d = be32_to_cpu(__y_m_d); h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_to_tm(y_m_d, h_m_s_ms, &tm); return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); -out: - ppc_md.get_rtc_time = NULL; - ppc_md.set_rtc_time = NULL; - return 0; } -void opal_get_rtc_time(struct rtc_time *tm) +static __init int opal_time_init(void) { - long rc = OPAL_BUSY; - u32 y_m_d; - u64 h_m_s_ms; - __be32 __y_m_d; - __be64 __h_m_s_ms; + struct platform_device *pdev; + struct device_node *rtc; - while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { - rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) - opal_poll_events(NULL); + rtc = of_find_node_by_path("/ibm,opal/rtc"); + if (rtc) { + pdev = of_platform_device_create(rtc, "opal-rtc", NULL); + of_node_put(rtc); + } else { + if (opal_check_token(OPAL_RTC_READ) || + opal_check_token(OPAL_READ_TPO)) + pdev = platform_device_register_simple("opal-rtc", -1, + NULL, 0); else - mdelay(10); + return -ENODEV; } - if (rc != OPAL_SUCCESS) - return; - y_m_d = be32_to_cpu(__y_m_d); - h_m_s_ms = be64_to_cpu(__h_m_s_ms); - opal_to_tm(y_m_d, h_m_s_ms, tm); -} - -int opal_set_rtc_time(struct rtc_time *tm) -{ - long rc = OPAL_BUSY; - u32 y_m_d = 0; - u64 h_m_s_ms = 0; - - y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24; - y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16; - y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8; - y_m_d |= ((u32)bin2bcd(tm->tm_mday)); - - h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56; - h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48; - h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40; - while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { - rc = opal_rtc_write(y_m_d, h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) - opal_poll_events(NULL); - else - mdelay(10); - } - return rc == OPAL_SUCCESS ? 0 : -EIO; + return PTR_ERR_OR_ZERO(pdev); } +machine_subsys_initcall(powernv, opal_time_init); diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index ae14c40b4b1c..e11273b2386d 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c @@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args) local_irq_save(flags); - depth = &__get_cpu_var(opal_trace_depth); + depth = this_cpu_ptr(&opal_trace_depth); if (*depth) goto out; @@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval) local_irq_save(flags); - depth = &__get_cpu_var(opal_trace_depth); + depth = this_cpu_ptr(&opal_trace_depth); if (*depth) goto out; diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index feb549aa3eea..0a299be588af 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -18,7 +18,7 @@ .section ".text" #ifdef CONFIG_TRACEPOINTS -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL #define OPAL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) #else @@ -250,3 +250,7 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE); +OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO); +OPAL_CALL(opal_tpo_read, OPAL_READ_TPO); +OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND); +OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index d019b081df9d..cb0b6de79cd4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -50,7 +50,6 @@ static int mc_recoverable_range_len; struct device_node *opal_node; static DEFINE_SPINLOCK(opal_write_lock); -extern u64 opal_mc_secondary_handler[]; static unsigned int *opal_irqs; static unsigned int opal_irq_count; static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); @@ -644,6 +643,16 @@ static void __init opal_dump_region_init(void) pr_warn("DUMP: Failed to register kernel log buffer. " "rc = %d\n", rc); } + +static void opal_ipmi_init(struct device_node *opal_node) +{ + struct device_node *np; + + for_each_child_of_node(opal_node, np) + if (of_device_is_compatible(np, "ibm,opal-ipmi")) + of_platform_device_create(np, NULL, NULL); +} + static int __init opal_init(void) { struct device_node *np, *consoles; @@ -707,6 +716,8 @@ static int __init opal_init(void) opal_msglog_init(); } + opal_ipmi_init(opal_node); + return 0; } machine_subsys_initcall(powernv, opal_init); @@ -742,6 +753,8 @@ void opal_shutdown(void) /* Export this so that test modules can use it */ EXPORT_SYMBOL_GPL(opal_invalid_call); +EXPORT_SYMBOL_GPL(opal_ipmi_send); +EXPORT_SYMBOL_GPL(opal_ipmi_recv); /* Convert a region of vmalloc memory to an opal sg list */ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, @@ -805,3 +818,9 @@ void opal_free_sg_list(struct opal_sg_list *sg) sg = NULL; } } + +EXPORT_SYMBOL_GPL(opal_poll_events); +EXPORT_SYMBOL_GPL(opal_rtc_read); +EXPORT_SYMBOL_GPL(opal_rtc_write); +EXPORT_SYMBOL_GPL(opal_tpo_read); +EXPORT_SYMBOL_GPL(opal_tpo_write); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 3ba435ec3dcd..fac88ed8a915 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -91,6 +91,24 @@ static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); } +static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) +{ + if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) { + pr_warn("%s: Invalid PE %d on PHB#%x\n", + __func__, pe_no, phb->hose->global_number); + return; + } + + if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) { + pr_warn("%s: PE %d was assigned on PHB#%x\n", + __func__, pe_no, phb->hose->global_number); + return; + } + + phb->ioda.pe_array[pe_no].phb = phb; + phb->ioda.pe_array[pe_no].pe_number = pe_no; +} + static int pnv_ioda_alloc_pe(struct pnv_phb *phb) { unsigned long pe; @@ -172,7 +190,7 @@ fail: return -EIO; } -static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb) +static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb) { resource_size_t sgsz = phb->ioda.m64_segsize; struct pci_dev *pdev; @@ -185,16 +203,15 @@ static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb) * instead of root bus. */ list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) { - for (i = PCI_BRIDGE_RESOURCES; - i <= PCI_BRIDGE_RESOURCE_END; i++) { - r = &pdev->resource[i]; + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { + r = &pdev->resource[PCI_BRIDGE_RESOURCES + i]; if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags)) continue; base = (r->start - phb->ioda.m64_base) / sgsz; for (step = 0; step < resource_size(r) / sgsz; step++) - set_bit(base + step, phb->ioda.pe_alloc); + pnv_ioda_reserve_pe(phb, base + step); } } } @@ -287,8 +304,6 @@ done: while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) < phb->ioda.total_pe) { pe = &phb->ioda.pe_array[i]; - pe->phb = phb; - pe->pe_number = i; if (!master_pe) { pe->flags |= PNV_IODA_PE_MASTER; @@ -313,6 +328,12 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) const u32 *r; u64 pci_addr; + /* FIXME: Support M64 for P7IOC */ + if (phb->type != PNV_PHB_IODA2) { + pr_info(" Not support M64 window\n"); + return; + } + if (!firmware_has_feature(FW_FEATURE_OPALv3)) { pr_info(" Firmware too old to support M64 window\n"); return; @@ -325,12 +346,6 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) return; } - /* FIXME: Support M64 for P7IOC */ - if (phb->type != PNV_PHB_IODA2) { - pr_info(" Not support M64 window\n"); - return; - } - res = &hose->mem_resources[1]; res->start = of_translate_address(dn, r + 2); res->end = res->start + of_read_number(r + 4, 2) - 1; @@ -345,7 +360,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) /* Use last M64 BAR to cover M64 window */ phb->ioda.m64_bar_idx = 15; phb->init_m64 = pnv_ioda2_init_m64; - phb->alloc_m64_pe = pnv_ioda2_alloc_m64_pe; + phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe; phb->pick_m64_pe = pnv_ioda2_pick_m64_pe; } @@ -358,7 +373,9 @@ static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) /* Fetch master PE */ if (pe->flags & PNV_IODA_PE_SLAVE) { pe = pe->master; - WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); + if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) + return; + pe_no = pe->pe_number; } @@ -507,6 +524,106 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) } #endif /* CONFIG_PCI_MSI */ +static int pnv_ioda_set_one_peltv(struct pnv_phb *phb, + struct pnv_ioda_pe *parent, + struct pnv_ioda_pe *child, + bool is_add) +{ + const char *desc = is_add ? "adding" : "removing"; + uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN : + OPAL_REMOVE_PE_FROM_DOMAIN; + struct pnv_ioda_pe *slave; + long rc; + + /* Parent PE affects child PE */ + rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, + child->pe_number, op); + if (rc != OPAL_SUCCESS) { + pe_warn(child, "OPAL error %ld %s to parent PELTV\n", + rc, desc); + return -ENXIO; + } + + if (!(child->flags & PNV_IODA_PE_MASTER)) + return 0; + + /* Compound case: parent PE affects slave PEs */ + list_for_each_entry(slave, &child->slaves, list) { + rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, + slave->pe_number, op); + if (rc != OPAL_SUCCESS) { + pe_warn(slave, "OPAL error %ld %s to parent PELTV\n", + rc, desc); + return -ENXIO; + } + } + + return 0; +} + +static int pnv_ioda_set_peltv(struct pnv_phb *phb, + struct pnv_ioda_pe *pe, + bool is_add) +{ + struct pnv_ioda_pe *slave; + struct pci_dev *pdev; + int ret; + + /* + * Clear PE frozen state. If it's master PE, we need + * clear slave PE frozen state as well. + */ + if (is_add) { + opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, + OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); + if (pe->flags & PNV_IODA_PE_MASTER) { + list_for_each_entry(slave, &pe->slaves, list) + opal_pci_eeh_freeze_clear(phb->opal_id, + slave->pe_number, + OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); + } + } + + /* + * Associate PE in PELT. We need add the PE into the + * corresponding PELT-V as well. Otherwise, the error + * originated from the PE might contribute to other + * PEs. + */ + ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); + if (ret) + return ret; + + /* For compound PEs, any one affects all of them */ + if (pe->flags & PNV_IODA_PE_MASTER) { + list_for_each_entry(slave, &pe->slaves, list) { + ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); + if (ret) + return ret; + } + } + + if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) + pdev = pe->pbus->self; + else + pdev = pe->pdev->bus->self; + while (pdev) { + struct pci_dn *pdn = pci_get_pdn(pdev); + struct pnv_ioda_pe *parent; + + if (pdn && pdn->pe_number != IODA_INVALID_PE) { + parent = &phb->ioda.pe_array[pdn->pe_number]; + ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); + if (ret) + return ret; + } + + pdev = pdev->bus->self; + } + + return 0; +} + static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pci_dev *parent; @@ -561,48 +678,36 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) return -ENXIO; } - rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, - pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); - if (rc) - pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc); - opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, - OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); + /* Configure PELTV */ + pnv_ioda_set_peltv(phb, pe, true); - /* Add to all parents PELT-V */ - while (parent) { - struct pci_dn *pdn = pci_get_pdn(parent); - if (pdn && pdn->pe_number != IODA_INVALID_PE) { - rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, - pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); - /* XXX What to do in case of error ? */ - } - parent = parent->bus->self; - } /* Setup reverse map */ for (rid = pe->rid; rid < rid_end; rid++) phb->ioda.pe_rmap[rid] = pe->pe_number; /* Setup one MVTs on IODA1 */ - if (phb->type == PNV_PHB_IODA1) { - pe->mve_number = pe->pe_number; - rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, - pe->pe_number); + if (phb->type != PNV_PHB_IODA1) { + pe->mve_number = 0; + goto out; + } + + pe->mve_number = pe->pe_number; + rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); + if (rc != OPAL_SUCCESS) { + pe_err(pe, "OPAL error %ld setting up MVE %d\n", + rc, pe->mve_number); + pe->mve_number = -1; + } else { + rc = opal_pci_set_mve_enable(phb->opal_id, + pe->mve_number, OPAL_ENABLE_MVE); if (rc) { - pe_err(pe, "OPAL error %ld setting up MVE %d\n", + pe_err(pe, "OPAL error %ld enabling MVE %d\n", rc, pe->mve_number); pe->mve_number = -1; - } else { - rc = opal_pci_set_mve_enable(phb->opal_id, - pe->mve_number, OPAL_ENABLE_MVE); - if (rc) { - pe_err(pe, "OPAL error %ld enabling MVE %d\n", - rc, pe->mve_number); - pe->mve_number = -1; - } } - } else if (phb->type == PNV_PHB_IODA2) - pe->mve_number = 0; + } +out: return 0; } @@ -837,8 +942,8 @@ static void pnv_pci_ioda_setup_PEs(void) phb = hose->private_data; /* M64 layout might affect PE allocation */ - if (phb->alloc_m64_pe) - phb->alloc_m64_pe(phb); + if (phb->reserve_m64_pe) + phb->reserve_m64_pe(phb); pnv_ioda_setup_PEs(hose->bus); } @@ -1834,19 +1939,14 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); - phb = alloc_bootmem(sizeof(struct pnv_phb)); - if (!phb) { - pr_err(" Out of memory !\n"); - return; - } + phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0); /* Allocate PCI controller */ - memset(phb, 0, sizeof(struct pnv_phb)); phb->hose = hose = pcibios_alloc_controller(np); if (!phb->hose) { pr_err(" Can't allocate PCI controller for %s\n", np->full_name); - free_bootmem((unsigned long)phb, sizeof(struct pnv_phb)); + memblock_free(__pa(phb), sizeof(struct pnv_phb)); return; } @@ -1913,8 +2013,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, } pemap_off = size; size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); - aux = alloc_bootmem(size); - memset(aux, 0, size); + aux = memblock_virt_alloc(size, 0); phb->ioda.pe_alloc = aux; phb->ioda.m32_segmap = aux + m32map_off; if (phb->type == PNV_PHB_IODA1) @@ -1999,8 +2098,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); } - /* Configure M64 window */ - if (phb->init_m64 && phb->init_m64(phb)) + /* Remove M64 resource if we can't configure it successfully */ + if (!phb->init_m64 || phb->init_m64(phb)) hose->mem_resources[1].flags = 0; } diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 94ce3481490b..6ef6d4d8e7e2 100644 --- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -122,12 +122,9 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, return; } - phb = alloc_bootmem(sizeof(struct pnv_phb)); - if (phb) { - memset(phb, 0, sizeof(struct pnv_phb)); - phb->hose = pcibios_alloc_controller(np); - } - if (!phb || !phb->hose) { + phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0); + phb->hose = pcibios_alloc_controller(np); + if (!phb->hose) { pr_err(" Failed to allocate PCI controller\n"); return; } @@ -196,16 +193,27 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) hub_id = be64_to_cpup(prop64); pr_info(" HUB-ID : 0x%016llx\n", hub_id); + /* Count child PHBs and calculate TCE space per PHB */ + for_each_child_of_node(np, phbn) { + if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || + of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) + phb_count++; + } + + if (phb_count <= 0) { + pr_info(" No PHBs for Hub %s\n", np->full_name); + return; + } + + tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count); + pr_info(" Allocating %lld MB of TCE memory per PHB\n", + tce_per_phb >> 20); + /* Currently allocate 16M of TCE memory for every Hub * * XXX TODO: Make it chip local if possible */ - tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY, - __pa(MAX_DMA_ADDRESS)); - if (!tce_mem) { - pr_err(" Failed to allocate TCE Memory !\n"); - return; - } + tce_mem = memblock_virt_alloc(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY); pr_debug(" TCE : 0x%016lx..0x%016lx\n", __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1); rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem), @@ -215,18 +223,6 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) return; } - /* Count child PHBs */ - for_each_child_of_node(np, phbn) { - if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || - of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) - phb_count++; - } - - /* Calculate how much TCE space we can give per PHB */ - tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count); - pr_info(" Allocating %lld MB of TCE memory per PHB\n", - tce_per_phb >> 20); - /* Initialize PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 4b20f2c6b3b2..4945e87f12dc 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -16,7 +16,6 @@ #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> @@ -90,7 +89,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) return rc; } irq_set_msi_desc(virq, entry); - write_msi_msg(virq, &msg); + pci_write_msi_msg(virq, &msg); } return 0; } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 34d29eb2a4de..6c02ff8dd69f 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -130,7 +130,7 @@ struct pnv_phb { u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); void (*shutdown)(struct pnv_phb *phb); int (*init_m64)(struct pnv_phb *phb); - void (*alloc_m64_pe)(struct pnv_phb *phb); + void (*reserve_m64_pe)(struct pnv_phb *phb); int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all); int (*get_pe_state)(struct pnv_phb *phb, int pe_no); void (*freeze_pe)(struct pnv_phb *phb, int pe_no); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 3f9546d8a51f..30b1c3e298a6 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -265,10 +265,8 @@ static unsigned long pnv_memory_block_size(void) static void __init pnv_setup_machdep_opal(void) { ppc_md.get_boot_time = opal_get_boot_time; - ppc_md.get_rtc_time = opal_get_rtc_time; - ppc_md.set_rtc_time = opal_set_rtc_time; ppc_md.restart = pnv_restart; - ppc_md.power_off = pnv_power_off; + pm_power_off = pnv_power_off; ppc_md.halt = pnv_halt; ppc_md.machine_check_exception = opal_machine_check; ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery; @@ -285,7 +283,7 @@ static void __init pnv_setup_machdep_rtas(void) ppc_md.set_rtc_time = rtas_set_rtc_time; } ppc_md.restart = rtas_restart; - ppc_md.power_off = rtas_power_off; + pm_power_off = rtas_power_off; ppc_md.halt = rtas_halt; } #endif /* CONFIG_PPC_POWERNV_RTAS */ diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 4753958cd509..b716f666e48a 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -149,6 +149,7 @@ static int pnv_smp_cpu_disable(void) static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; + unsigned long srr1; /* Standard hot unplug procedure */ local_irq_disable(); @@ -165,13 +166,25 @@ static void pnv_smp_cpu_kill_self(void) mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { ppc64_runlatch_off(); - power7_nap(1); + srr1 = power7_nap(1); ppc64_runlatch_on(); - /* Clear the IPI that woke us up */ - icp_native_flush_interrupt(); - local_paca->irq_happened &= PACA_IRQ_HARD_DIS; - mb(); + /* + * If the SRR1 value indicates that we woke up due to + * an external interrupt, then clear the interrupt. + * We clear the interrupt before checking for the + * reason, so as to avoid a race where we wake up for + * some other reason, find nothing and clear the interrupt + * just as some other cpu is sending us an interrupt. + * If we returned from power7_nap as a result of + * having finished executing in a KVM guest, then srr1 + * contains 0. + */ + if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { + icp_native_flush_interrupt(); + local_paca->irq_happened &= PACA_IRQ_HARD_DIS; + smp_mb(); + } if (cpu_core_split_required()) continue; diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index 3e270e3412ae..2f95d33cf34a 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -110,7 +110,7 @@ static long ps3_hpte_remove(unsigned long hpte_group) static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int psize, int apsize, - int ssize, int local) + int ssize, unsigned long inv_flags) { int result; u64 hpte_v, want_v, hpte_rs; diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 5f3b23220b8e..a6c42f34303a 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq) static unsigned int ps3_get_irq(void) { - struct ps3_private *pd = &__get_cpu_var(ps3_private); + struct ps3_private *pd = this_cpu_ptr(&ps3_private); u64 x = (pd->bmp.status & pd->bmp.mask); unsigned int plug; diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 3f509f86432c..799c8580ab09 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -125,12 +125,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = __alloc_bootmem(p->size, p->align, __pa(MAX_DMA_ADDRESS)); - if (!p->address) { - printk(KERN_ERR "%s: Cannot allocate %s\n", __func__, - p->name); - return; - } + p->address = memblock_virt_alloc(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); @@ -248,6 +243,7 @@ static int __init ps3_probe(void) ps3_mm_init(); ps3_mm_vas_create(&htab_size); ps3_hpte_init(htab_size); + pm_power_off = ps3_power_off; DBG(" <- %s:%d\n", __func__, __LINE__); return 1; @@ -278,7 +274,6 @@ define_machine(ps3) { .calibrate_decr = ps3_calibrate_decr, .progress = ps3_progress, .restart = ps3_restart, - .power_off = ps3_power_off, .halt = ps3_halt, #if defined(CONFIG_KEXEC) .kexec_cpu_down = ps3_kexec_cpu_down, diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 1062f71f5a85..39049e4884fb 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -75,7 +75,7 @@ static atomic_t dtl_count; */ static void consume_dtle(struct dtl_entry *dtle, u64 index) { - struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); + struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings); struct dtl_entry *wp = dtlr->write_ptr; struct lppaca *vpa = local_paca->lppaca_ptr; diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 5c375f93c669..f30cf4d136a4 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -340,16 +340,17 @@ static void pseries_remove_processor(struct device_node *np) } static int pseries_smp_notifier(struct notifier_block *nb, - unsigned long action, void *node) + unsigned long action, void *data) { + struct of_reconfig_data *rd = data; int err = 0; switch (action) { case OF_RECONFIG_ATTACH_NODE: - err = pseries_add_processor(node); + err = pseries_add_processor(rd->dn); break; case OF_RECONFIG_DETACH_NODE: - pseries_remove_processor(node); + pseries_remove_processor(rd->dn); break; } return notifier_from_errno(err); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 3c4c0dcd90d3..fa41f0da5b6f 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -12,7 +12,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/memblock.h> -#include <linux/vmalloc.h> #include <linux/memory.h> #include <linux/memory_hotplug.h> @@ -66,22 +65,6 @@ unsigned long pseries_memory_block_size(void) } #ifdef CONFIG_MEMORY_HOTREMOVE -static int pseries_remove_memory(u64 start, u64 size) -{ - int ret; - - /* Remove htab bolted mappings for this section of memory */ - start = (unsigned long)__va(start); - ret = remove_section_mapping(start, start + size); - - /* Ensure all vmalloc mappings are flushed in case they also - * hit that section of memory - */ - vm_unmap_aliases(); - - return ret; -} - static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long block_sz, start_pfn; @@ -183,7 +166,7 @@ static int pseries_add_mem_node(struct device_node *np) return (ret < 0) ? -EINVAL : 0; } -static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) +static int pseries_update_drconf_memory(struct of_reconfig_data *pr) { struct of_drconf_cell *new_drmem, *old_drmem; unsigned long memblock_size; @@ -232,22 +215,21 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) } static int pseries_memory_notifier(struct notifier_block *nb, - unsigned long action, void *node) + unsigned long action, void *data) { - struct of_prop_reconfig *pr; + struct of_reconfig_data *rd = data; int err = 0; switch (action) { case OF_RECONFIG_ATTACH_NODE: - err = pseries_add_mem_node(node); + err = pseries_add_mem_node(rd->dn); break; case OF_RECONFIG_DETACH_NODE: - err = pseries_remove_mem_node(node); + err = pseries_remove_mem_node(rd->dn); break; case OF_RECONFIG_UPDATE_PROPERTY: - pr = (struct of_prop_reconfig *)node; - if (!strcmp(pr->prop->name, "ibm,dynamic-memory")) - err = pseries_update_drconf_memory(pr); + if (!strcmp(rd->prop->name, "ibm,dynamic-memory")) + err = pseries_update_drconf_memory(rd); break; } return notifier_from_errno(err); @@ -262,10 +244,6 @@ static int __init pseries_memory_hotplug_init(void) if (firmware_has_feature(FW_FEATURE_LPAR)) of_reconfig_notifier_register(&pseries_mem_nb); -#ifdef CONFIG_MEMORY_HOTREMOVE - ppc_md.remove_memory = pseries_remove_memory; -#endif - return 0; } machine_device_initcall(pseries, pseries_memory_hotplug_init); diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 3fda3f17b84e..ccd53f91e8aa 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -18,7 +18,7 @@ #ifdef CONFIG_TRACEPOINTS -#ifndef CONFIG_JUMP_LABEL +#ifndef HAVE_JUMP_LABEL .section ".toc","aw" .globl hcall_tracepoint_refcount @@ -78,7 +78,7 @@ hcall_tracepoint_refcount: mr r5,BUFREG; \ __HCALL_INST_POSTCALL -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL #define HCALL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) #else diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 4575f0c9e521..f02ec3ab428c 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long if (opcode > MAX_HCALL_OPCODE) return; - h = &__get_cpu_var(hcall_stats)[opcode / 4]; + h = this_cpu_ptr(&hcall_stats[opcode / 4]); h->tb_start = mftb(); h->purr_start = mfspr(SPRN_PURR); } @@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long if (opcode > MAX_HCALL_OPCODE) return; - h = &__get_cpu_var(hcall_stats)[opcode / 4]; + h = this_cpu_ptr(&hcall_stats[opcode / 4]); h->num_calls++; h->tb_total += mftb() - h->tb_start; h->purr_total += mfspr(SPRN_PURR) - h->purr_start; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index e32e00976a94..1d3d52dc3ff3 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, local_irq_save(flags); /* to protect tcep and the page behind it */ - tcep = __get_cpu_var(tce_page); + tcep = __this_cpu_read(tce_page); /* This is safe to do since interrupts are off when we're called * from iommu_alloc{,_sg}() @@ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } - __get_cpu_var(tce_page) = tcep; + __this_cpu_write(tce_page, tcep); } rpn = __pa(uaddr) >> TCE_SHIFT; @@ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, long l, limit; local_irq_disable(); /* to protect tcep and the page behind it */ - tcep = __get_cpu_var(tce_page); + tcep = __this_cpu_read(tce_page); if (!tcep) { tcep = (__be64 *)__get_free_page(GFP_ATOMIC); @@ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, local_irq_enable(); return -ENOMEM; } - __get_cpu_var(tce_page) = tcep; + __this_cpu_write(tce_page, tcep); } proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; @@ -574,8 +574,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) while (isa_dn && isa_dn != dn) isa_dn = isa_dn->parent; - if (isa_dn_orig) - of_node_put(isa_dn_orig); + of_node_put(isa_dn_orig); /* Count number of direct PCI children of the PHB. */ for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling) @@ -1251,10 +1250,11 @@ static struct notifier_block iommu_mem_nb = { .notifier_call = iommu_mem_notifier, }; -static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) +static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data) { int err = NOTIFY_OK; - struct device_node *np = node; + struct of_reconfig_data *rd = data; + struct device_node *np = rd->dn; struct pci_dn *pci = PCI_DN(np); struct direct_window *window; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f6880d2a40fb..469751d92004 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -284,7 +284,7 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int psize, int apsize, - int ssize, int local) + int ssize, unsigned long inv_flags) { unsigned long lpar_rc; unsigned long flags = (newpp & 7) | H_AVPN; @@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, - int psize, int ssize) + int psize, int ssize, int local) { int i, index = 0; unsigned long s_addr = addr; @@ -515,7 +515,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) unsigned long vpn; unsigned long i, pix, rc; unsigned long flags = 0; - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); unsigned long param[9]; unsigned long hash, index, shift, hidx, slot; @@ -705,7 +705,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) local_irq_save(flags); - depth = &__get_cpu_var(hcall_trace_depth); + depth = this_cpu_ptr(&hcall_trace_depth); if (*depth) goto out; @@ -730,7 +730,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval, local_irq_save(flags); - depth = &__get_cpu_var(hcall_trace_depth); + depth = this_cpu_ptr(&hcall_trace_depth); if (*depth) goto out; diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 8b909e94fd9a..691a154c286d 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -476,7 +476,7 @@ again: irq_set_msi_desc(virq, entry); /* Read config space back so we can restore after reset */ - __read_msi_msg(entry, &msg); + __pci_read_msi_msg(entry, &msg); entry->msg = msg; } diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 11a3b617ef5d..054a0ed5c7ee 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -715,6 +715,8 @@ static int nvram_pstore_init(void) nvram_pstore_info.buf = oops_data; nvram_pstore_info.bufsize = oops_data_sz; + spin_lock_init(&nvram_pstore_info.buf_lock); + rc = pstore_register(&nvram_pstore_info); if (rc != 0) pr_err("nvram: pstore_register() failed, defaults to " diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 67e48594040c..fe16a50700de 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -134,7 +134,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) of_node_put(pdn); if (rc) { - pr_err("no ibm,pcie-link-speed-stats property\n"); + pr_debug("no ibm,pcie-link-speed-stats property\n"); return 0; } diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 5a4d0fc03b03..c3b2a7e81ddb 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) /* If it isn't an extended log we can use the per cpu 64bit buffer */ h = (struct rtas_error_log *)&savep[1]; if (!rtas_error_extended(h)) { - memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); - errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); + memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64)); + errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf); } else { int len, error_log_length; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 125c589eeef5..e445b6701f50 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -251,9 +251,10 @@ static void __init pseries_discover_pic(void) " interrupt-controller\n"); } -static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) +static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct device_node *np = node; + struct of_reconfig_data *rd = data; + struct device_node *np = rd->dn; struct pci_dn *pci = NULL; int err = NOTIFY_OK; @@ -499,7 +500,11 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_SET_MODE)) { long rc; - if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) { + + rc = pSeries_enable_reloc_on_exc(); + if (rc == H_P2) { + pr_info("Relocation on exceptions not supported\n"); + } else if (rc != H_SUCCESS) { pr_warn("Unable to enable relocation on exceptions: " "%ld\n", rc); } @@ -659,6 +664,34 @@ static void __init pSeries_init_early(void) pr_debug(" <- pSeries_init_early()\n"); } +/** + * pseries_power_off - tell firmware about how to power off the system. + * + * This function calls either the power-off rtas token in normal cases + * or the ibm,power-off-ups token (if present & requested) in case of + * a power failure. If power-off token is used, power on will only be + * possible with power button press. If ibm,power-off-ups token is used + * it will allow auto poweron after power is restored. + */ +static void pseries_power_off(void) +{ + int rc; + int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups"); + + if (rtas_flash_term_hook) + rtas_flash_term_hook(SYS_POWER_OFF); + + if (rtas_poweron_auto == 0 || + rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) { + rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1); + printk(KERN_INFO "RTAS power-off returned %d\n", rc); + } else { + rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL); + printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc); + } + for (;;); +} + /* * Called very early, MMU is off, device-tree isn't unflattened */ @@ -741,6 +774,8 @@ static int __init pSeries_probe(void) else hpte_init_native(); + pm_power_off = pseries_power_off; + pr_debug("Machine is%s LPAR !\n", (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); @@ -754,34 +789,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; } -/** - * pSeries_power_off - tell firmware about how to power off the system. - * - * This function calls either the power-off rtas token in normal cases - * or the ibm,power-off-ups token (if present & requested) in case of - * a power failure. If power-off token is used, power on will only be - * possible with power button press. If ibm,power-off-ups token is used - * it will allow auto poweron after power is restored. - */ -static void pSeries_power_off(void) -{ - int rc; - int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups"); - - if (rtas_flash_term_hook) - rtas_flash_term_hook(SYS_POWER_OFF); - - if (rtas_poweron_auto == 0 || - rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) { - rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1); - printk(KERN_INFO "RTAS power-off returned %d\n", rc); - } else { - rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL); - printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc); - } - for (;;); -} - #ifndef CONFIG_PCI void pSeries_final_fixup(void) { } #endif @@ -796,7 +803,6 @@ define_machine(pseries) { .pcibios_fixup = pSeries_final_fixup, .pci_probe_mode = pSeries_pci_probe_mode, .restart = rtas_restart, - .power_off = pSeries_power_off, .halt = rtas_halt, .panic = rtas_os_term, .get_boot_time = rtas_get_boot_time, diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index ad56edc39919..f532c92bf99d 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -326,7 +326,6 @@ static struct platform_driver axon_ram_driver = { .remove = axon_ram_remove, .driver = { .name = AXON_RAM_MODULE_NAME, - .owner = THIS_MODULE, .of_match_table = axon_ram_device_id, }, }; diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c index 90545ad1626e..861cebf9c292 100644 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c @@ -210,7 +210,6 @@ static const struct of_device_id mpc85xx_l2ctlr_of_match[] = { static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { .driver = { .name = "fsl-l2ctlr", - .owner = THIS_MODULE, .of_match_table = mpc85xx_l2ctlr_of_match, }, .probe = mpc85xx_l2ctlr_of_probe, diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index da08ed088157..4bbb4b8dfd09 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -13,7 +13,6 @@ * */ #include <linux/irq.h> -#include <linux/bootmem.h> #include <linux/msi.h> #include <linux/pci.h> #include <linux/slab.h> @@ -82,8 +81,8 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) static struct irq_chip fsl_msi_chip = { - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, .irq_ack = fsl_msi_end_irq, .irq_print_chip = fsl_msi_print_chip, }; @@ -242,7 +241,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) irq_set_msi_desc(virq, entry); fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); - write_msi_msg(virq, &msg); + pci_write_msi_msg(virq, &msg); } return 0; @@ -578,7 +577,6 @@ static const struct of_device_id fsl_of_msi_ids[] = { static struct platform_driver fsl_of_msi_driver = { .driver = { .name = "fsl-msi", - .owner = THIS_MODULE, .of_match_table = fsl_of_msi_ids, }, .probe = fsl_of_msi_probe, diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 65d2ed4549e6..6455c1eada1a 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -23,7 +23,6 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/log2.h> #include <linux/slab.h> @@ -152,7 +151,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci, flags |= 0x10000000; /* enable relaxed ordering */ for (i = 0; size > 0; i++) { - unsigned int bits = min(ilog2(size), + unsigned int bits = min_t(u32, ilog2(size), __ffs(pci_addr | phys_addr)); if (index + i >= 5) diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 8cf4aa0e3a25..1d6fd7c59fe9 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -80,7 +80,6 @@ static const struct of_device_id pmc_ids[] = { static struct platform_driver pmc_driver = { .driver = { .name = "fsl-pmc", - .owner = THIS_MODULE, .of_match_table = pmc_ids, }, .probe = pmc_probe, diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index c04b718307c8..c1cd3698f534 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -58,6 +58,19 @@ #define RIO_ISR_AACR 0x10120 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ +#define RIWTAR_TRAD_VAL_SHIFT 12 +#define RIWTAR_TRAD_MASK 0x00FFFFFF +#define RIWBAR_BADD_VAL_SHIFT 12 +#define RIWBAR_BADD_MASK 0x003FFFFF +#define RIWAR_ENABLE 0x80000000 +#define RIWAR_TGINT_LOCAL 0x00F00000 +#define RIWAR_RDTYP_NO_SNOOP 0x00040000 +#define RIWAR_RDTYP_SNOOP 0x00050000 +#define RIWAR_WRTYP_NO_SNOOP 0x00004000 +#define RIWAR_WRTYP_SNOOP 0x00005000 +#define RIWAR_WRTYP_ALLOC 0x00006000 +#define RIWAR_SIZE_MASK 0x0000003F + #define __fsl_read_rio_config(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ @@ -266,6 +279,89 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, return 0; } +static void fsl_rio_inbound_mem_init(struct rio_priv *priv) +{ + int i; + + /* close inbound windows */ + for (i = 0; i < RIO_INB_ATMU_COUNT; i++) + out_be32(&priv->inb_atmu_regs[i].riwar, 0); +} + +int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u32 size, u32 flags) +{ + struct rio_priv *priv = mport->priv; + u32 base_size; + unsigned int base_size_log; + u64 win_start, win_end; + u32 riwar; + int i; + + if ((size & (size - 1)) != 0) + return -EINVAL; + + base_size_log = ilog2(size); + base_size = 1 << base_size_log; + + /* check if addresses are aligned with the window size */ + if (lstart & (base_size - 1)) + return -EINVAL; + if (rstart & (base_size - 1)) + return -EINVAL; + + /* check for conflicting ranges */ + for (i = 0; i < RIO_INB_ATMU_COUNT; i++) { + riwar = in_be32(&priv->inb_atmu_regs[i].riwar); + if ((riwar & RIWAR_ENABLE) == 0) + continue; + win_start = ((u64)(in_be32(&priv->inb_atmu_regs[i].riwbar) & RIWBAR_BADD_MASK)) + << RIWBAR_BADD_VAL_SHIFT; + win_end = win_start + ((1 << ((riwar & RIWAR_SIZE_MASK) + 1)) - 1); + if (rstart < win_end && (rstart + size) > win_start) + return -EINVAL; + } + + /* find unused atmu */ + for (i = 0; i < RIO_INB_ATMU_COUNT; i++) { + riwar = in_be32(&priv->inb_atmu_regs[i].riwar); + if ((riwar & RIWAR_ENABLE) == 0) + break; + } + if (i >= RIO_INB_ATMU_COUNT) + return -ENOMEM; + + out_be32(&priv->inb_atmu_regs[i].riwtar, lstart >> RIWTAR_TRAD_VAL_SHIFT); + out_be32(&priv->inb_atmu_regs[i].riwbar, rstart >> RIWBAR_BADD_VAL_SHIFT); + out_be32(&priv->inb_atmu_regs[i].riwar, RIWAR_ENABLE | RIWAR_TGINT_LOCAL | + RIWAR_RDTYP_SNOOP | RIWAR_WRTYP_SNOOP | (base_size_log - 1)); + + return 0; +} + +void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart) +{ + u32 win_start_shift, base_start_shift; + struct rio_priv *priv = mport->priv; + u32 riwar, riwtar; + int i; + + /* skip default window */ + base_start_shift = lstart >> RIWTAR_TRAD_VAL_SHIFT; + for (i = 0; i < RIO_INB_ATMU_COUNT; i++) { + riwar = in_be32(&priv->inb_atmu_regs[i].riwar); + if ((riwar & RIWAR_ENABLE) == 0) + continue; + + riwtar = in_be32(&priv->inb_atmu_regs[i].riwtar); + win_start_shift = riwtar & RIWTAR_TRAD_MASK; + if (win_start_shift == base_start_shift) { + out_be32(&priv->inb_atmu_regs[i].riwar, riwar & ~RIWAR_ENABLE); + return; + } + } +} + void fsl_rio_port_error_handler(int offset) { /*XXX: Error recovery is not implemented, we just clear errors */ @@ -389,6 +485,8 @@ int fsl_rio_setup(struct platform_device *dev) ops->add_outb_message = fsl_add_outb_message; ops->add_inb_buffer = fsl_add_inb_buffer; ops->get_inb_message = fsl_get_inb_message; + ops->map_inb = fsl_map_inb_mem; + ops->unmap_inb = fsl_unmap_inb_mem; rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); if (!rmu_node) { @@ -602,6 +700,11 @@ int fsl_rio_setup(struct platform_device *dev) RIO_ATMU_REGS_PORT2_OFFSET)); priv->maint_atmu_regs = priv->atmu_regs + 1; + priv->inb_atmu_regs = (struct rio_inb_atmu_regs __iomem *) + (priv->regs_win + + ((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET : + RIO_INB_ATMU_REGS_PORT2_OFFSET)); + /* Set to receive any dist ID for serial RapidIO controller. */ if (port->phy_type == RIO_PHY_SERIAL) @@ -620,6 +723,7 @@ int fsl_rio_setup(struct platform_device *dev) rio_law_start = range_start; fsl_rio_setup_rmu(port, rmu_np[i]); + fsl_rio_inbound_mem_init(priv); dbell->mport[i] = port; @@ -673,7 +777,6 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = { static struct platform_driver fsl_of_rio_rpn_driver = { .driver = { .name = "fsl-of-rio", - .owner = THIS_MODULE, .of_match_table = fsl_of_rio_rpn_ids, }, .probe = fsl_of_rio_rpn_probe, diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h index ae8e27405a0d..d53407a34f32 100644 --- a/arch/powerpc/sysdev/fsl_rio.h +++ b/arch/powerpc/sysdev/fsl_rio.h @@ -50,9 +50,12 @@ #define RIO_S_DBELL_REGS_OFFSET 0x13400 #define RIO_S_PW_REGS_OFFSET 0x134e0 #define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40 +#define RIO_INB_ATMU_REGS_PORT1_OFFSET 0x10d60 +#define RIO_INB_ATMU_REGS_PORT2_OFFSET 0x10f60 #define MAX_MSG_UNIT_NUM 2 #define MAX_PORT_NUM 4 +#define RIO_INB_ATMU_COUNT 4 struct rio_atmu_regs { u32 rowtar; @@ -63,6 +66,15 @@ struct rio_atmu_regs { u32 pad2[3]; }; +struct rio_inb_atmu_regs { + u32 riwtar; + u32 pad1; + u32 riwbar; + u32 pad2; + u32 riwar; + u32 pad3[3]; +}; + struct rio_dbell_ring { void *virt; dma_addr_t phys; @@ -99,6 +111,7 @@ struct rio_priv { void __iomem *regs_win; struct rio_atmu_regs __iomem *atmu_regs; struct rio_atmu_regs __iomem *maint_atmu_regs; + struct rio_inb_atmu_regs __iomem *inb_atmu_regs; void __iomem *maint_win; void *rmm_handle; /* RapidIO message manager(unit) Handle */ }; diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index ffd1169ebaab..99269c041615 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -197,8 +197,7 @@ static int __init setup_rstcr(void) if (!rstcr && ppc_md.restart == fsl_rstcr_restart) printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); - if (np) - of_node_put(np); + of_node_put(np); return 0; } @@ -238,7 +237,7 @@ void fsl_hv_restart(char *cmd) /* * Halt the current partition * - * This function should be assigned to the ppc_md.power_off and ppc_md.halt + * This function should be assigned to the pm_power_off and ppc_md.halt * function pointers, to shut down the partition when we're running under * the Freescale hypervisor. */ diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index b50f97811c25..b28733727ed3 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -20,7 +20,6 @@ #include <linux/signal.h> #include <linux/syscore_ops.h> #include <linux/device.h> -#include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/fsl_devices.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c index 5492dc5f56f4..f4f0301b9a60 100644 --- a/arch/powerpc/sysdev/mpc5xxx_clocks.c +++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c @@ -26,8 +26,7 @@ unsigned long mpc5xxx_get_bus_frequency(struct device_node *node) of_node_put(node); node = np; } - if (node) - of_node_put(node); + of_node_put(node); return p_bus_freq ? *p_bus_freq : 0; } diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 89cec0ed6a58..c4648ad5c1f3 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -24,7 +24,6 @@ #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> -#include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/slab.h> diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 7bdf3cc741e4..3f165d972a0e 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -270,7 +270,6 @@ static const struct of_device_id mpic_msgr_ids[] = { static struct platform_driver mpic_msgr_driver = { .driver = { .name = "mpic-msgr", - .owner = THIS_MODULE, .of_match_table = mpic_msgr_ids, }, .probe = mpic_msgr_probe, diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 15dccd35fa11..a3f660eed6de 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c @@ -16,7 +16,6 @@ #undef DEBUG #include <linux/irq.h> -#include <linux/bootmem.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> @@ -42,7 +41,7 @@ static struct mpic *msi_mpic; static void mpic_pasemi_msi_mask_irq(struct irq_data *data) { pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); - mask_msi_irq(data); + pci_msi_mask_irq(data); mpic_mask_irq(data); } @@ -50,7 +49,7 @@ static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) { pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); mpic_unmask_irq(data); - unmask_msi_irq(data); + pci_msi_unmask_irq(data); } static struct irq_chip mpic_pasemi_msi_chip = { @@ -136,7 +135,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) * register to generate MSI [512...1023] */ msg.data = hwirq-0x200; - write_msi_msg(virq, &msg); + pci_write_msi_msg(virq, &msg); } return 0; diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 623d7fba15b4..b2cef1809389 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -10,7 +10,6 @@ */ #include <linux/irq.h> -#include <linux/bootmem.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> @@ -25,14 +24,14 @@ static struct mpic *msi_mpic; static void mpic_u3msi_mask_irq(struct irq_data *data) { - mask_msi_irq(data); + pci_msi_mask_irq(data); mpic_mask_irq(data); } static void mpic_u3msi_unmask_irq(struct irq_data *data) { mpic_unmask_irq(data); - unmask_msi_irq(data); + pci_msi_unmask_irq(data); } static struct irq_chip mpic_u3msi_chip = { @@ -171,7 +170,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", virq, hwirq, (unsigned long)addr); msg.data = hwirq; - write_msi_msg(virq, &msg); + pci_write_msi_msg(virq, &msg); hwirq++; } diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 13e67d93a7c1..8a0b77a3ec0c 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c @@ -210,7 +210,6 @@ static struct platform_driver pmi_of_platform_driver = { .remove = pmi_of_remove, .driver = { .name = "pmi", - .owner = THIS_MODULE, .of_match_table = pmi_match, }, }; diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c index 82e2cfe35c62..ba95adf81d8d 100644 --- a/arch/powerpc/sysdev/ppc4xx_cpm.c +++ b/arch/powerpc/sysdev/ppc4xx_cpm.c @@ -281,7 +281,7 @@ static int __init cpm_init(void) printk(KERN_ERR "cpm: could not parse dcr property for %s\n", np->full_name); ret = -EINVAL; - goto out; + goto node_put; } cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); @@ -290,7 +290,7 @@ static int __init cpm_init(void) printk(KERN_ERR "cpm: failed to map dcr property for %s\n", np->full_name); ret = -EINVAL; - goto out; + goto node_put; } /* All 4xx SoCs with a CPM controller have one of two @@ -330,9 +330,9 @@ static int __init cpm_init(void) if (cpm.standby || cpm.suspend) suspend_set_ops(&cpm_suspend_ops); +node_put: + of_node_put(np); out: - if (np) - of_node_put(np); return ret; } diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c index a6a4dbda9078..ed9970ff8d94 100644 --- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c @@ -85,7 +85,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); return -EINVAL; } - write_msi_msg(hwirq, &msg); + pci_write_msi_msg(hwirq, &msg); } return 0; @@ -197,7 +197,6 @@ static struct platform_driver hsta_msi_driver = { .probe = hsta_msi_probe, .driver = { .name = "hsta-msi", - .owner = THIS_MODULE, .of_match_table = hsta_msi_ids, }, }; diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c index 22b5200636e7..6e2e6aa378bb 100644 --- a/arch/powerpc/sysdev/ppc4xx_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_msi.c @@ -22,7 +22,6 @@ */ #include <linux/irq.h> -#include <linux/bootmem.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/of_platform.h> @@ -116,7 +115,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) irq_set_msi_desc(virq, entry); msg.data = int_no; - write_msi_msg(virq, &msg); + pci_write_msi_msg(virq, &msg); } return 0; } @@ -270,7 +269,6 @@ static struct platform_driver ppc4xx_msi_driver = { .remove = ppc4xx_of_msi_remove, .driver = { .name = "ppc4xx-msi", - .owner = THIS_MODULE, .of_match_table = ppc4xx_msi_ids, }, diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index df6e2fc4ff92..086aca69ecae 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c @@ -22,7 +22,6 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/of.h> -#include <linux/bootmem.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 238a07b97f2c..1f29cee8da7b 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c @@ -22,7 +22,6 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/interrupt.h> -#include <linux/bootmem.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/ioport.h> @@ -693,7 +692,6 @@ static const struct of_device_id qe_ids[] = { static struct platform_driver qe_driver = { .driver = { .name = "fsl-qe", - .owner = THIS_MODULE, .of_match_table = qe_ids, }, .probe = qe_probe, diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index b2b87c30e266..543765e1ef14 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -23,7 +23,6 @@ #include <linux/sched.h> #include <linux/signal.h> #include <linux/device.h> -#include <linux/bootmem.h> #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/io.h> diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 92033936a8f7..7c37157d4c24 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -19,7 +19,6 @@ #include <linux/sched.h> #include <linux/signal.h> #include <linux/device.h> -#include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/interrupt.h> diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 3c6ee1b64e5d..4ba554ec8eaf 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c @@ -73,7 +73,7 @@ static unsigned int ics_opal_startup(struct irq_data *d) * at that level, so we do it here by hand. */ if (d->msi_desc) - unmask_msi_irq(d); + pci_msi_unmask_irq(d); #endif /* unmask it */ diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 936575d99c5c..bc81335b2cbc 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -76,7 +76,7 @@ static unsigned int ics_rtas_startup(struct irq_data *d) * at that level, so we do it here by hand. */ if (d->msi_desc) - unmask_msi_irq(d); + pci_msi_unmask_irq(d); #endif /* unmask it */ ics_rtas_unmask_irq(d); diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index fe0cca477164..365249cd346b 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -155,7 +155,7 @@ int __init xics_smp_probe(void) void xics_teardown_cpu(void) { - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); /* * we have to reset the cppr index to 0 because we're diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index c8efbb37d6e0..5b150f0c5df9 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -51,6 +51,12 @@ #include <asm/paca.h> #endif +#if defined(CONFIG_PPC_SPLPAR) +#include <asm/plpar_wrappers.h> +#else +static inline long plapr_set_ciabr(unsigned long ciabr) {return 0; }; +#endif + #include "nonstdio.h" #include "dis-asm.h" @@ -88,10 +94,9 @@ struct bpt { }; /* Bits in bpt.enabled */ -#define BP_IABR_TE 1 /* IABR translation enabled */ -#define BP_IABR 2 -#define BP_TRAP 8 -#define BP_DABR 0x10 +#define BP_CIABR 1 +#define BP_TRAP 2 +#define BP_DABR 4 #define NBPTS 256 static struct bpt bpts[NBPTS]; @@ -270,6 +275,45 @@ static inline void cinval(void *p) asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p)); } +/** + * write_ciabr() - write the CIABR SPR + * @ciabr: The value to write. + * + * This function writes a value to the CIARB register either directly + * through mtspr instruction if the kernel is in HV privilege mode or + * call a hypervisor function to achieve the same in case the kernel + * is in supervisor privilege mode. + */ +static void write_ciabr(unsigned long ciabr) +{ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return; + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + mtspr(SPRN_CIABR, ciabr); + return; + } + plapr_set_ciabr(ciabr); +} + +/** + * set_ciabr() - set the CIABR + * @addr: The value to set. + * + * This function sets the correct privilege value into the the HW + * breakpoint address before writing it up in the CIABR register. + */ +static void set_ciabr(unsigned long addr) +{ + addr &= ~CIABR_PRIV; + + if (cpu_has_feature(CPU_FTR_HVMODE)) + addr |= CIABR_PRIV_HYPER; + else + addr |= CIABR_PRIV_SUPER; + write_ciabr(addr); +} + /* * Disable surveillance (the service processor watchdog function) * while we are in xmon. @@ -727,7 +771,7 @@ static void insert_bpts(void) bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { - if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0) + if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0) continue; if (mread(bp->address, &bp->instr[0], 4) != 4) { printf("Couldn't read instruction at %lx, " @@ -742,7 +786,7 @@ static void insert_bpts(void) continue; } store_inst(&bp->instr[0]); - if (bp->enabled & BP_IABR) + if (bp->enabled & BP_CIABR) continue; if (mwrite(bp->address, &bpinstr, 4) != 4) { printf("Couldn't write instruction at %lx, " @@ -764,9 +808,9 @@ static void insert_cpu_bpts(void) brk.len = 8; __set_breakpoint(&brk); } - if (iabr && cpu_has_feature(CPU_FTR_IABR)) - mtspr(SPRN_IABR, iabr->address - | (iabr->enabled & (BP_IABR|BP_IABR_TE))); + + if (iabr) + set_ciabr(iabr->address); } static void remove_bpts(void) @@ -777,7 +821,7 @@ static void remove_bpts(void) bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { - if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP) + if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) continue; if (mread(bp->address, &instr, 4) == 4 && instr == bpinstr @@ -792,8 +836,7 @@ static void remove_bpts(void) static void remove_cpu_bpts(void) { hw_breakpoint_disable(); - if (cpu_has_feature(CPU_FTR_IABR)) - mtspr(SPRN_IABR, 0); + write_ciabr(0); } /* Command interpreting routine */ @@ -907,7 +950,7 @@ cmds(struct pt_regs *excp) case 'u': dump_segments(); break; -#elif defined(CONFIG_4xx) +#elif defined(CONFIG_44x) case 'u': dump_tlb_44x(); break; @@ -981,7 +1024,8 @@ static void bootcmds(void) else if (cmd == 'h') ppc_md.halt(); else if (cmd == 'p') - ppc_md.power_off(); + if (pm_power_off) + pm_power_off(); } static int cpu_cmd(void) @@ -1127,7 +1171,7 @@ static char *breakpoint_help_string = "b <addr> [cnt] set breakpoint at given instr addr\n" "bc clear all breakpoints\n" "bc <n/addr> clear breakpoint number n or at addr\n" - "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n" + "bi <addr> [cnt] set hardware instr breakpoint (POWER8 only)\n" "bd <addr> [cnt] set hardware data breakpoint\n" ""; @@ -1166,13 +1210,13 @@ bpt_cmds(void) break; case 'i': /* bi - hardware instr breakpoint */ - if (!cpu_has_feature(CPU_FTR_IABR)) { + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) { printf("Hardware instruction breakpoint " "not supported on this cpu\n"); break; } if (iabr) { - iabr->enabled &= ~(BP_IABR | BP_IABR_TE); + iabr->enabled &= ~BP_CIABR; iabr = NULL; } if (!scanhex(&a)) @@ -1181,7 +1225,7 @@ bpt_cmds(void) break; bp = new_breakpoint(a); if (bp != NULL) { - bp->enabled |= BP_IABR | BP_IABR_TE; + bp->enabled |= BP_CIABR; iabr = bp; } break; @@ -1238,7 +1282,7 @@ bpt_cmds(void) if (!bp->enabled) continue; printf("%2x %s ", BP_NUM(bp), - (bp->enabled & BP_IABR)? "inst": "trap"); + (bp->enabled & BP_CIABR) ? "inst": "trap"); xmon_print_symbol(bp->address, " ", "\n"); } break; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f2cf1f90295b..68b68d755fdf 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -65,6 +65,7 @@ config S390 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 47c8630c93cd..15c94246b600 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -511,7 +511,6 @@ static const struct dev_pm_ops appldata_pm_ops = { static struct platform_driver appldata_pdrv = { .driver = { .name = "appldata", - .owner = THIS_MODULE, .pm = &appldata_pm_ops, }, }; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 23223cd63e54..1f272b24fc0b 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void) module_init(aes_s390_init); module_exit(aes_s390_fini); -MODULE_ALIAS("aes-all"); +MODULE_ALIAS_CRYPTO("aes-all"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 7acb77f7ef1a..9e05cc453a40 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -619,8 +619,8 @@ static void __exit des_s390_exit(void) module_init(des_s390_init); module_exit(des_s390_exit); -MODULE_ALIAS("des"); -MODULE_ALIAS("des3_ede"); +MODULE_ALIAS_CRYPTO("des"); +MODULE_ALIAS_CRYPTO("des3_ede"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index d43485d142e9..7940dc90e80b 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void) module_init(ghash_mod_init); module_exit(ghash_mod_exit); -MODULE_ALIAS("ghash"); +MODULE_ALIAS_CRYPTO("ghash"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation"); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index a1b3a9dc9d8a..5b2bee323694 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void) module_init(sha1_s390_init); module_exit(sha1_s390_fini); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 9b853809a492..b74ff158108c 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void) module_init(sha256_s390_init); module_exit(sha256_s390_fini); -MODULE_ALIAS("sha256"); -MODULE_ALIAS("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 32a81383b69c..0c36989ba182 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = { } }; -MODULE_ALIAS("sha512"); +MODULE_ALIAS_CRYPTO("sha512"); static int sha384_init(struct shash_desc *desc) { @@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = { } }; -MODULE_ALIAS("sha384"); +MODULE_ALIAS_CRYPTO("sha384"); static int __init init(void) { diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 2badf2bf9cd7..47fe1055c714 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -83,10 +83,9 @@ static ssize_t dbfs_read(struct file *file, char __user *buf, static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct hypfs_dbfs_file *df; + struct hypfs_dbfs_file *df = file_inode(file)->i_private; long rc; - df = file->f_path.dentry->d_inode->i_private; mutex_lock(&df->lock); if (df->unlocked_ioctl) rc = df->unlocked_ioctl(file, cmd, arg); diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 773f86676588..c631f98fd524 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index b5dce6544d76..8d724718ec21 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -24,11 +24,14 @@ #define rmb() mb() #define wmb() mb() -#define read_barrier_depends() do { } while(0) +#define dma_rmb() rmb() +#define dma_wmb() wmb() #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() + +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 4236408070e5..6259895fcd97 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -11,200 +11,28 @@ #include <linux/types.h> #include <linux/bug.h> -extern void __xchg_called_with_bad_pointer(void); - -static inline unsigned long __xchg(unsigned long x, void *ptr, int size) -{ - unsigned long addr, old; - int shift; - - switch (size) { - case 1: - addr = (unsigned long) ptr; - shift = (3 ^ (addr & 3)) << 3; - addr ^= addr & 3; - asm volatile( - " l %0,%4\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%4\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) addr) - : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)), - "Q" (*(int *) addr) : "memory", "cc", "0"); - return old >> shift; - case 2: - addr = (unsigned long) ptr; - shift = (2 ^ (addr & 2)) << 3; - addr ^= addr & 2; - asm volatile( - " l %0,%4\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%4\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) addr) - : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)), - "Q" (*(int *) addr) : "memory", "cc", "0"); - return old >> shift; - case 4: - asm volatile( - " l %0,%3\n" - "0: cs %0,%2,%3\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) ptr) - : "d" (x), "Q" (*(int *) ptr) - : "memory", "cc"); - return old; -#ifdef CONFIG_64BIT - case 8: - asm volatile( - " lg %0,%3\n" - "0: csg %0,%2,%3\n" - " jl 0b\n" - : "=&d" (old), "=m" (*(long *) ptr) - : "d" (x), "Q" (*(long *) ptr) - : "memory", "cc"); - return old; -#endif /* CONFIG_64BIT */ - } - __xchg_called_with_bad_pointer(); - return x; -} - -#define xchg(ptr, x) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\ - __ret; \ +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __o = (o); \ + __typeof__(*(ptr)) __n = (n); \ + (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\ }) -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#define __HAVE_ARCH_CMPXCHG - -extern void __cmpxchg_called_with_bad_pointer(void); - -static inline unsigned long __cmpxchg(void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long addr, prev, tmp; - int shift; - - switch (size) { - case 1: - addr = (unsigned long) ptr; - shift = (3 ^ (addr & 3)) << 3; - addr ^= addr & 3; - asm volatile( - " l %0,%2\n" - "0: nr %0,%5\n" - " lr %1,%0\n" - " or %0,%3\n" - " or %1,%4\n" - " cs %0,%1,%2\n" - " jnl 1f\n" - " xr %1,%0\n" - " nr %1,%5\n" - " jnz 0b\n" - "1:" - : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr) - : "d" ((old & 0xff) << shift), - "d" ((new & 0xff) << shift), - "d" (~(0xff << shift)) - : "memory", "cc"); - return prev >> shift; - case 2: - addr = (unsigned long) ptr; - shift = (2 ^ (addr & 2)) << 3; - addr ^= addr & 2; - asm volatile( - " l %0,%2\n" - "0: nr %0,%5\n" - " lr %1,%0\n" - " or %0,%3\n" - " or %1,%4\n" - " cs %0,%1,%2\n" - " jnl 1f\n" - " xr %1,%0\n" - " nr %1,%5\n" - " jnz 0b\n" - "1:" - : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr) - : "d" ((old & 0xffff) << shift), - "d" ((new & 0xffff) << shift), - "d" (~(0xffff << shift)) - : "memory", "cc"); - return prev >> shift; - case 4: - asm volatile( - " cs %0,%3,%1\n" - : "=&d" (prev), "=Q" (*(int *) ptr) - : "0" (old), "d" (new), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev; -#ifdef CONFIG_64BIT - case 8: - asm volatile( - " csg %0,%3,%1\n" - : "=&d" (prev), "=Q" (*(long *) ptr) - : "0" (old), "d" (new), "Q" (*(long *) ptr) - : "memory", "cc"); - return prev; -#endif /* CONFIG_64BIT */ - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \ - sizeof(*(ptr))); \ - __ret; \ -}) +#define cmpxchg64 cmpxchg +#define cmpxchg_local cmpxchg +#define cmpxchg64_local cmpxchg -#ifdef CONFIG_64BIT -#define cmpxchg64(ptr, o, n) \ +#define xchg(ptr, x) \ ({ \ - cmpxchg((ptr), (o), (n)); \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old; \ + do { \ + __old = *__ptr; \ + } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \ + __old; \ }) -#else /* CONFIG_64BIT */ -static inline unsigned long long __cmpxchg64(void *ptr, - unsigned long long old, - unsigned long long new) -{ - register_pair rp_old = {.pair = old}; - register_pair rp_new = {.pair = new}; - unsigned long long *ullptr = ptr; - asm volatile( - " cds %0,%2,%1" - : "+d" (rp_old), "+Q" (*ullptr) - : "d" (rp_new) - : "memory", "cc"); - return rp_old.pair; -} - -#define cmpxchg64(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg64((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n)); \ - __ret; \ -}) -#endif /* CONFIG_64BIT */ +#define __HAVE_ARCH_CMPXCHG #define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ ({ \ @@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void); #define system_has_cmpxchg_double() 1 -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(void *ptr, - unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 1: - case 2: - case 4: -#ifdef CONFIG_64BIT - case 8: -#endif - return __cmpxchg(ptr, old, new, size); - default: - return __cmpxchg_local_generic(ptr, old, new, size); - } - - return old; -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - __ret; \ -}) - -#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n)) - #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index f8c196984853..b91e960e4045 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -10,6 +10,8 @@ #include <linux/types.h> #include <asm/div64.h> +#define CPUTIME_PER_USEC 4096ULL +#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ @@ -38,24 +40,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base) */ static inline unsigned long cputime_to_jiffies(const cputime_t cputime) { - return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); + return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ); } static inline cputime_t jiffies_to_cputime(const unsigned int jif) { - return (__force cputime_t)(jif * (4096000000ULL / HZ)); + return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ)); } static inline u64 cputime64_to_jiffies64(cputime64_t cputime) { unsigned long long jif = (__force unsigned long long) cputime; - do_div(jif, 4096000000ULL / HZ); + do_div(jif, CPUTIME_PER_SEC / HZ); return jif; } static inline cputime64_t jiffies64_to_cputime64(const u64 jif) { - return (__force cputime64_t)(jif * (4096000000ULL / HZ)); + return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ)); } /* @@ -68,7 +70,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime) static inline cputime_t usecs_to_cputime(const unsigned int m) { - return (__force cputime_t)(m * 4096ULL); + return (__force cputime_t)(m * CPUTIME_PER_USEC); } #define usecs_to_cputime64(m) usecs_to_cputime(m) @@ -78,12 +80,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m) */ static inline unsigned int cputime_to_secs(const cputime_t cputime) { - return __div((__force unsigned long long) cputime, 2048000000) >> 1; + return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1; } static inline cputime_t secs_to_cputime(const unsigned int s) { - return (__force cputime_t)(s * 4096000000ULL); + return (__force cputime_t)(s * CPUTIME_PER_SEC); } /* @@ -91,8 +93,8 @@ static inline cputime_t secs_to_cputime(const unsigned int s) */ static inline cputime_t timespec_to_cputime(const struct timespec *value) { - unsigned long long ret = value->tv_sec * 4096000000ULL; - return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); + unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC; + return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC)); } static inline void cputime_to_timespec(const cputime_t cputime, @@ -103,12 +105,12 @@ static inline void cputime_to_timespec(const cputime_t cputime, register_pair rp; rp.pair = __cputime >> 1; - asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); - value->tv_nsec = rp.subreg.even * 1000 / 4096; + asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2)); + value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC; value->tv_sec = rp.subreg.odd; #else - value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; - value->tv_sec = __cputime / 4096000000ULL; + value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; + value->tv_sec = __cputime / CPUTIME_PER_SEC; #endif } @@ -119,8 +121,8 @@ static inline void cputime_to_timespec(const cputime_t cputime, */ static inline cputime_t timeval_to_cputime(const struct timeval *value) { - unsigned long long ret = value->tv_sec * 4096000000ULL; - return (__force cputime_t)(ret + value->tv_usec * 4096ULL); + unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC; + return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC); } static inline void cputime_to_timeval(const cputime_t cputime, @@ -131,12 +133,12 @@ static inline void cputime_to_timeval(const cputime_t cputime, register_pair rp; rp.pair = __cputime >> 1; - asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); - value->tv_usec = rp.subreg.even / 4096; + asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2)); + value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC; value->tv_sec = rp.subreg.odd; #else - value->tv_usec = (__cputime % 4096000000ULL) / 4096; - value->tv_sec = __cputime / 4096000000ULL; + value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; + value->tv_sec = __cputime / CPUTIME_PER_SEC; #endif } @@ -146,13 +148,13 @@ static inline void cputime_to_timeval(const cputime_t cputime, static inline clock_t cputime_to_clock_t(cputime_t cputime) { unsigned long long clock = (__force unsigned long long) cputime; - do_div(clock, 4096000000ULL / USER_HZ); + do_div(clock, CPUTIME_PER_SEC / USER_HZ); return clock; } static inline cputime_t clock_t_to_cputime(unsigned long x) { - return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); + return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ)); } /* @@ -161,7 +163,7 @@ static inline cputime_t clock_t_to_cputime(unsigned long x) static inline clock_t cputime64_to_clock_t(cputime64_t cputime) { unsigned long long clock = (__force unsigned long long) cputime; - do_div(clock, 4096000000ULL / USER_HZ); + do_div(clock, CPUTIME_PER_SEC / USER_HZ); return clock; } diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 530c15eb01e9..0206c8052328 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt) * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! */ extern debug_entry_t * -debug_sprintf_event(debug_info_t* id,int level,char *string,...) +__debug_sprintf_event(debug_info_t *id, int level, char *string, ...) __attribute__ ((format(printf, 3, 4))); +#define debug_sprintf_event(_id, _level, _fmt, ...) \ +({ \ + debug_entry_t *__ret; \ + debug_info_t *__id = _id; \ + int __level = _level; \ + if ((!__id) || (__level > __id->level)) \ + __ret = NULL; \ + else \ + __ret = __debug_sprintf_event(__id, __level, \ + _fmt, ## __VA_ARGS__); \ + __ret; \ +}) static inline debug_entry_t* debug_exception(debug_info_t* id, int level, void* data, int length) @@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt) * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! */ extern debug_entry_t * -debug_sprintf_exception(debug_info_t* id,int level,char *string,...) +__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...) __attribute__ ((format(printf, 3, 4))); +#define debug_sprintf_exception(_id, _level, _fmt, ...) \ +({ \ + debug_entry_t *__ret; \ + debug_info_t *__id = _id; \ + int __level = _level; \ + if ((!__id) || (__level > __id->level)) \ + __ret = NULL; \ + else \ + __ret = __debug_sprintf_exception(__id, __level, \ + _fmt, ## __VA_ARGS__);\ + __ret; \ +}) + int debug_register_view(debug_info_t* id, struct debug_view* view); int debug_unregister_view(debug_info_t* id, struct debug_view* view); diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 3aef8afec336..abb618f1ead2 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -1,25 +1,69 @@ #ifndef _ASM_S390_FTRACE_H #define _ASM_S390_FTRACE_H +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#define MCOUNT_INSN_SIZE 24 +#define MCOUNT_RETURN_FIXUP 18 + #ifndef __ASSEMBLY__ -extern void _mcount(void); +#define ftrace_return_address(n) __builtin_return_address(n) + +void _mcount(void); +void ftrace_caller(void); + extern char ftrace_graph_caller_end; +extern unsigned long ftrace_plt; struct dyn_arch_ftrace { }; -#define MCOUNT_ADDR ((long)_mcount) +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define FTRACE_ADDR ((unsigned long)ftrace_caller) +#define KPROBE_ON_FTRACE_NOP 0 +#define KPROBE_ON_FTRACE_CALL 1 static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; } -#endif /* __ASSEMBLY__ */ +struct ftrace_insn { + u16 opc; + s32 disp; +} __packed; + +static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn) +{ +#ifdef CONFIG_FUNCTION_TRACER + /* jg .+24 */ + insn->opc = 0xc0f4; + insn->disp = MCOUNT_INSN_SIZE / 2; +#endif +} -#define MCOUNT_INSN_SIZE 18 +static inline int is_ftrace_nop(struct ftrace_insn *insn) +{ +#ifdef CONFIG_FUNCTION_TRACER + if (insn->disp == MCOUNT_INSN_SIZE / 2) + return 1; +#endif + return 0; +} -#define ARCH_SUPPORTS_FTRACE_OPS 1 +static inline void ftrace_generate_call_insn(struct ftrace_insn *insn, + unsigned long ip) +{ +#ifdef CONFIG_FUNCTION_TRACER + unsigned long target; + + /* brasl r0,ftrace_caller */ + target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR; + insn->opc = 0xc005; + insn->disp = (target - ip) / 2; +#endif +} +#endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h index 6af037f574b8..113cd963dbbe 100644 --- a/arch/s390/include/asm/idle.h +++ b/arch/s390/include/asm/idle.h @@ -9,9 +9,10 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/seqlock.h> struct s390_idle_data { - unsigned int sequence; + seqcount_t seqcount; unsigned long long idle_count; unsigned long long idle_time; unsigned long long clock_idle_enter; diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 6ad9013c67e7..30fd5c84680e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -39,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr) { } +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return NULL; +} + +static inline void ioport_unmap(void __iomem *p) +{ +} + /* * s390 needs a private implementation of pci_iomap since ioremap with its * offset parameter isn't sufficient. That's because BAR spaces are not diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index b0d5f0a97a01..343ea7c987aa 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -1,11 +1,11 @@ #ifndef _ASM_IRQ_H #define _ASM_IRQ_H -#define EXT_INTERRUPT 1 -#define IO_INTERRUPT 2 -#define THIN_INTERRUPT 3 +#define EXT_INTERRUPT 0 +#define IO_INTERRUPT 1 +#define THIN_INTERRUPT 2 -#define NR_IRQS_BASE 4 +#define NR_IRQS_BASE 3 #ifdef CONFIG_PCI_NR_MSI # define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) @@ -13,9 +13,6 @@ # define NR_IRQS NR_IRQS_BASE #endif -/* This number is used when no interrupt has been assigned */ -#define NO_IRQ 0 - /* External interruption codes */ #define EXT_IRQ_INTERRUPT_KEY 0x0040 #define EXT_IRQ_CLK_COMP 0x1004 diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 98629173ce3b..b47ad3b642cc 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t; struct arch_specific_insn { /* copy of original instruction */ kprobe_opcode_t *insn; + unsigned int is_ftrace_insn : 1; }; struct prev_kprobe { diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 6cc51fe84410..34fbcac61133 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -147,7 +147,7 @@ struct _lowcore { __u32 softirq_pending; /* 0x02ec */ __u32 percpu_offset; /* 0x02f0 */ __u32 machine_flags; /* 0x02f4 */ - __u32 ftrace_func; /* 0x02f8 */ + __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */ __u32 spinlock_lockval; /* 0x02fc */ __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ @@ -297,7 +297,7 @@ struct _lowcore { __u64 percpu_offset; /* 0x0378 */ __u64 vdso_per_cpu_data; /* 0x0380 */ __u64 machine_flags; /* 0x0388 */ - __u64 ftrace_func; /* 0x0390 */ + __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */ __u64 gmap; /* 0x0398 */ __u32 spinlock_lockval; /* 0x03a0 */ __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 3815bfea1b2d..f49b71954654 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -120,4 +120,15 @@ static inline void arch_exit_mmap(struct mm_struct *mm) { } +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + #endif /* __S390_MMU_CONTEXT_H */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index c030900320e0..ef803c202d42 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -50,10 +50,6 @@ struct zpci_fmb { atomic64_t unmapped_pages; } __packed __aligned(16); -#define ZPCI_MSI_VEC_BITS 11 -#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS) -#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1) - enum zpci_state { ZPCI_FN_STATE_RESERVED, ZPCI_FN_STATE_STANDBY, @@ -90,6 +86,7 @@ struct zpci_dev { /* IRQ stuff */ u64 msi_addr; /* MSI address */ + unsigned int max_msi; /* maximum number of MSI's */ struct airq_iv *aibv; /* adapter interrupt bit vector */ unsigned int aisb; /* number of the summary bit */ diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index d194d544d694..f664e96f48c7 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -139,7 +139,8 @@ static inline int zpci_memcpy_fromio(void *dst, int size, rc = 0; while (n > 0) { - size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8); + size = zpci_get_max_write_size((u64 __force) src, + (u64) dst, n, 8); req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); rc = zpci_read_single(req, dst, offset, size); if (rc) @@ -162,7 +163,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst, return -EINVAL; while (n > 0) { - size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128); + size = zpci_get_max_write_size((u64 __force) dst, + (u64) src, n, 128); req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); if (size > 8) /* main path */ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index d39a31c3cdf2..e510b9460efa 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -22,8 +22,6 @@ unsigned long *page_table_alloc(struct mm_struct *); void page_table_free(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); -void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long, - bool init_skey); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 57c882761dea..5e102422c9ab 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -133,6 +133,18 @@ extern unsigned long MODULES_END; #define MODULES_LEN (1UL << 31) #endif +static inline int is_module_addr(void *addr) +{ +#ifdef CONFIG_64BIT + BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); + if (addr < (void *)MODULES_VADDR) + return 0; + if (addr > (void *)MODULES_END) + return 0; +#endif + return 1; +} + /* * A 31 bit pagetable entry of S390 has following format: * | PFRA | | OS | @@ -479,6 +491,11 @@ static inline int mm_has_pgste(struct mm_struct *mm) return 0; } +/* + * In the case that a guest uses storage keys + * faults should no longer be backed by zero pages + */ +#define mm_forbids_zeropage mm_use_skey static inline int mm_use_skey(struct mm_struct *mm) { #ifdef CONFIG_PGSTE @@ -1634,6 +1651,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, return pmd; } +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL +static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp, int full) +{ + pmd_t pmd = *pmdp; + + if (!full) + pmdp_flush_lazy(mm, address, pmdp); + pmd_clear(pmdp); + return pmd; +} + #define __HAVE_ARCH_PMDP_CLEAR_FLUSH static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) @@ -1746,7 +1776,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) extern int vmem_add_mapping(unsigned long start, unsigned long size); extern int vmem_remove_mapping(unsigned long start, unsigned long size); extern int s390_enable_sie(void); -extern void s390_enable_skey(void); +extern int s390_enable_skey(void); +extern void s390_reset_cmma(struct mm_struct *mm); /* * No page table caches to initialise diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index d559bdb03d18..bed05ea7ec27 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -217,8 +217,6 @@ static inline unsigned short stap(void) */ static inline void cpu_relax(void) { - if (MACHINE_HAS_DIAG44) - asm volatile("diag 0,0,68"); barrier(); } diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index d6bdf906caa5..0e37cd041241 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -18,14 +18,7 @@ extern int spin_retry; static inline int _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) { - unsigned int old_expected = old; - - asm volatile( - " cs %0,%3,%1" - : "=d" (old), "=Q" (*lock) - : "0" (old), "d" (new), "Q" (*lock) - : "cc", "memory" ); - return old == old_expected; + return __sync_bool_compare_and_swap(lock, old, new); } /* diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 572c59949004..06d8741ad6f4 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -121,6 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, #ifdef CONFIG_64BIT if (tlb->mm->context.asce_limit <= (1UL << 31)) return; + pgtable_pmd_page_dtor(virt_to_page(pmd)); tlb_remove_table(tlb, pmd); #endif } diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index e031332096d7..296942d56e6a 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -86,4 +86,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 4197c89c52d4..2b446cf0cc65 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -287,7 +287,9 @@ #define __NR_getrandom 349 #define __NR_memfd_create 350 #define __NR_bpf 351 -#define NR_syscalls 352 +#define __NR_s390_pci_mmio_write 352 +#define __NR_s390_pci_mmio_read 353 +#define NR_syscalls 354 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ef279a136801..e07e91605353 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -17,8 +17,8 @@ * Make sure that the compiler is new enough. We want a compiler that * is known to work with the "Q" assembler constraint. */ -#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) -#error Your compiler is too old; please use version 3.3.3 or newer +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#error Your compiler is too old; please use version 4.3 or newer #endif int main(void) @@ -156,7 +156,6 @@ int main(void) DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); - DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); BLANK(); DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 009f5eb11125..34d5fa7b01b5 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -434,7 +434,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; } else { /* Signal frames without vectors registers are short ! */ - __u16 __user *svc = (void *) frame + frame_size - 2; + __u16 __user *svc = (void __user *) frame + frame_size - 2; if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) return -EFAULT; restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE; diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index c4f7a3d655b8..d7fa2f0f1425 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -218,3 +218,5 @@ COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); +COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); +COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ee8390da6ea7..c1f21aca76e7 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1019,7 +1019,7 @@ debug_count_numargs(char *string) */ debug_entry_t* -debug_sprintf_event(debug_info_t* id, int level,char *string,...) +__debug_sprintf_event(debug_info_t *id, int level, char *string, ...) { va_list ap; int numargs,idx; @@ -1027,8 +1027,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) debug_sprintf_entry_t *curr_event; debug_entry_t *active; - if((!id) || (level > id->level)) - return NULL; if (!debug_active || !id->areas) return NULL; numargs=debug_count_numargs(string); @@ -1050,14 +1048,14 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) return active; } -EXPORT_SYMBOL(debug_sprintf_event); +EXPORT_SYMBOL(__debug_sprintf_event); /* * debug_sprintf_exception: */ debug_entry_t* -debug_sprintf_exception(debug_info_t* id, int level,char *string,...) +__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...) { va_list ap; int numargs,idx; @@ -1065,8 +1063,6 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) debug_sprintf_entry_t *curr_event; debug_entry_t *active; - if((!id) || (level > id->level)) - return NULL; if (!debug_active || !id->areas) return NULL; @@ -1089,7 +1085,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) return active; } -EXPORT_SYMBOL(debug_sprintf_exception); +EXPORT_SYMBOL(__debug_sprintf_exception); /* * debug_register_view: diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index acb412442e5e..a99852e96a77 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -191,7 +191,8 @@ void die(struct pt_regs *regs, const char *str) console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); - printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); + printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff, + regs->int_code >> 17, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cef2879edff3..302ac1f7f8e7 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -12,7 +12,6 @@ #include <linux/errno.h> #include <linux/string.h> #include <linux/ctype.h> -#include <linux/ftrace.h> #include <linux/lockdep.h> #include <linux/module.h> #include <linux/pfn.h> @@ -490,8 +489,5 @@ void __init startup_init(void) detect_machine_facilities(); setup_topology(); sclp_early_detect(); -#ifdef CONFIG_DYNAMIC_FTRACE - S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; -#endif lockdep_on(); } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 70203265196f..398329b2b518 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -53,7 +53,7 @@ _PIF_WORK = (_PIF_PER_TRAP) .macro TRACE_IRQS_ON #ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 - l %r1,BASED(.Lhardirqs_on) + l %r1,BASED(.Lc_hardirqs_on) basr %r14,%r1 # call trace_hardirqs_on_caller #endif .endm @@ -61,7 +61,7 @@ _PIF_WORK = (_PIF_PER_TRAP) .macro TRACE_IRQS_OFF #ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 - l %r1,BASED(.Lhardirqs_off) + l %r1,BASED(.Lc_hardirqs_off) basr %r14,%r1 # call trace_hardirqs_off_caller #endif .endm @@ -70,7 +70,7 @@ _PIF_WORK = (_PIF_PER_TRAP) #ifdef CONFIG_LOCKDEP tm __PT_PSW+1(%r11),0x01 # returning to user ? jz .+10 - l %r1,BASED(.Llockdep_sys_exit) + l %r1,BASED(.Lc_lockdep_sys_exit) basr %r14,%r1 # call lockdep_sys_exit #endif .endm @@ -87,8 +87,8 @@ _PIF_WORK = (_PIF_PER_TRAP) tmh %r8,0x0001 # interrupting from user ? jnz 1f lr %r14,%r9 - sl %r14,BASED(.Lcritical_start) - cl %r14,BASED(.Lcritical_length) + sl %r14,BASED(.Lc_critical_start) + cl %r14,BASED(.Lc_critical_length) jhe 0f la %r11,\savearea # inside critical section, do cleanup bras %r14,cleanup_critical @@ -162,7 +162,7 @@ ENTRY(__switch_to) lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 -__critical_start: +.L__critical_start: /* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. @@ -170,145 +170,145 @@ __critical_start: ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER -sysc_stm: +.Lsysc_stm: stm %r8,%r15,__LC_SAVE_AREA_SYNC l %r12,__LC_THREAD_INFO l %r13,__LC_SVC_NEW_PSW+4 lhi %r14,_PIF_SYSCALL -sysc_per: +.Lsysc_per: l %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs -sysc_vtime: +.Lsysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC st %r14,__PT_FLAGS(%r11) -sysc_do_svc: +.Lsysc_do_svc: l %r10,__TI_sysc_table(%r12) # 31 bit system call table lh %r8,__PT_INT_CODE+2(%r11) sla %r8,2 # shift and test for svc0 - jnz sysc_nr_ok + jnz .Lsysc_nr_ok # svc 0: system call number in %r1 cl %r1,BASED(.Lnr_syscalls) - jnl sysc_nr_ok + jnl .Lsysc_nr_ok sth %r1,__PT_INT_CODE+2(%r11) lr %r8,%r1 sla %r8,2 -sysc_nr_ok: +.Lsysc_nr_ok: xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) st %r2,__PT_ORIG_GPR2(%r11) st %r7,STACK_FRAME_OVERHEAD(%r15) l %r9,0(%r8,%r10) # get system call addr. tm __TI_flags+3(%r12),_TIF_TRACE - jnz sysc_tracesys + jnz .Lsysc_tracesys basr %r14,%r9 # call sys_xxxx st %r2,__PT_R2(%r11) # store return value -sysc_return: +.Lsysc_return: LOCKDEP_SYS_EXIT -sysc_tif: +.Lsysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno sysc_restore + jno .Lsysc_restore tm __PT_FLAGS+3(%r11),_PIF_WORK - jnz sysc_work + jnz .Lsysc_work tm __TI_flags+3(%r12),_TIF_WORK - jnz sysc_work # check for thread work + jnz .Lsysc_work # check for thread work tm __LC_CPU_FLAGS+3,_CIF_WORK - jnz sysc_work -sysc_restore: + jnz .Lsysc_work +.Lsysc_restore: mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) stpt __LC_EXIT_TIMER lm %r0,%r15,__PT_R0(%r11) lpsw __LC_RETURN_PSW -sysc_done: +.Lsysc_done: # # One of the work bits is on. Find out which one. # -sysc_work: +.Lsysc_work: tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING - jo sysc_mcck_pending + jo .Lsysc_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED - jo sysc_reschedule + jo .Lsysc_reschedule tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP - jo sysc_singlestep + jo .Lsysc_singlestep tm __TI_flags+3(%r12),_TIF_SIGPENDING - jo sysc_sigpending + jo .Lsysc_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME - jo sysc_notify_resume + jo .Lsysc_notify_resume tm __LC_CPU_FLAGS+3,_CIF_ASCE - jo sysc_uaccess - j sysc_return # beware of critical section cleanup + jo .Lsysc_uaccess + j .Lsysc_return # beware of critical section cleanup # # _TIF_NEED_RESCHED is set, call schedule # -sysc_reschedule: - l %r1,BASED(.Lschedule) - la %r14,BASED(sysc_return) +.Lsysc_reschedule: + l %r1,BASED(.Lc_schedule) + la %r14,BASED(.Lsysc_return) br %r1 # call schedule # # _CIF_MCCK_PENDING is set, call handler # -sysc_mcck_pending: - l %r1,BASED(.Lhandle_mcck) - la %r14,BASED(sysc_return) +.Lsysc_mcck_pending: + l %r1,BASED(.Lc_handle_mcck) + la %r14,BASED(.Lsysc_return) br %r1 # TIF bit will be cleared by handler # # _CIF_ASCE is set, load user space asce # -sysc_uaccess: +.Lsysc_uaccess: ni __LC_CPU_FLAGS+3,255-_CIF_ASCE lctl %c1,%c1,__LC_USER_ASCE # load primary asce - j sysc_return + j .Lsysc_return # # _TIF_SIGPENDING is set, call do_signal # -sysc_sigpending: +.Lsysc_sigpending: lr %r2,%r11 # pass pointer to pt_regs - l %r1,BASED(.Ldo_signal) + l %r1,BASED(.Lc_do_signal) basr %r14,%r1 # call do_signal tm __PT_FLAGS+3(%r11),_PIF_SYSCALL - jno sysc_return + jno .Lsysc_return lm %r2,%r7,__PT_R2(%r11) # load svc arguments l %r10,__TI_sysc_table(%r12) # 31 bit system call table xr %r8,%r8 # svc 0 returns -ENOSYS clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) - jnl sysc_nr_ok # invalid svc number -> do svc 0 + jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number sla %r8,2 - j sysc_nr_ok # restart svc + j .Lsysc_nr_ok # restart svc # # _TIF_NOTIFY_RESUME is set, call do_notify_resume # -sysc_notify_resume: +.Lsysc_notify_resume: lr %r2,%r11 # pass pointer to pt_regs - l %r1,BASED(.Ldo_notify_resume) - la %r14,BASED(sysc_return) + l %r1,BASED(.Lc_do_notify_resume) + la %r14,BASED(.Lsysc_return) br %r1 # call do_notify_resume # # _PIF_PER_TRAP is set, call do_per_trap # -sysc_singlestep: +.Lsysc_singlestep: ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP lr %r2,%r11 # pass pointer to pt_regs - l %r1,BASED(.Ldo_per_trap) - la %r14,BASED(sysc_return) + l %r1,BASED(.Lc_do_per_trap) + la %r14,BASED(.Lsysc_return) br %r1 # call do_per_trap # # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # and after the system call # -sysc_tracesys: - l %r1,BASED(.Ltrace_enter) +.Lsysc_tracesys: + l %r1,BASED(.Lc_trace_enter) lr %r2,%r11 # pass pointer to pt_regs la %r3,0 xr %r0,%r0 @@ -316,22 +316,22 @@ sysc_tracesys: st %r0,__PT_R2(%r11) basr %r14,%r1 # call do_syscall_trace_enter cl %r2,BASED(.Lnr_syscalls) - jnl sysc_tracenogo + jnl .Lsysc_tracenogo lr %r8,%r2 sll %r8,2 l %r9,0(%r8,%r10) -sysc_tracego: +.Lsysc_tracego: lm %r3,%r7,__PT_R3(%r11) st %r7,STACK_FRAME_OVERHEAD(%r15) l %r2,__PT_ORIG_GPR2(%r11) basr %r14,%r9 # call sys_xxx st %r2,__PT_R2(%r11) # store return value -sysc_tracenogo: +.Lsysc_tracenogo: tm __TI_flags+3(%r12),_TIF_TRACE - jz sysc_return - l %r1,BASED(.Ltrace_exit) + jz .Lsysc_return + l %r1,BASED(.Lc_trace_exit) lr %r2,%r11 # pass pointer to pt_regs - la %r14,BASED(sysc_return) + la %r14,BASED(.Lsysc_return) br %r1 # call do_syscall_trace_exit # @@ -341,18 +341,18 @@ ENTRY(ret_from_fork) la %r11,STACK_FRAME_OVERHEAD(%r15) l %r12,__LC_THREAD_INFO l %r13,__LC_SVC_NEW_PSW+4 - l %r1,BASED(.Lschedule_tail) + l %r1,BASED(.Lc_schedule_tail) basr %r14,%r1 # call schedule_tail TRACE_IRQS_ON ssm __LC_SVC_NEW_PSW # reenable interrupts tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? - jne sysc_tracenogo + jne .Lsysc_tracenogo # it's a kernel thread lm %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) basr %r14,%r9 - j sysc_tracenogo + j .Lsysc_tracenogo /* * Program check handler routine @@ -369,7 +369,7 @@ ENTRY(pgm_check_handler) tmh %r8,0x4000 # PER bit set in old PSW ? jnz 0f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception - jnz pgm_svcper # -> single stepped svc + jnz .Lpgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f @@ -386,42 +386,42 @@ ENTRY(pgm_check_handler) jz 0f l %r1,__TI_task(%r12) tmh %r8,0x0001 # kernel per event ? - jz pgm_kprobe + jz .Lpgm_kprobe oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 0: REENABLE_IRQS xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - l %r1,BASED(.Ljump_table) + l %r1,BASED(.Lc_jump_table) la %r10,0x7f n %r10,__PT_INT_CODE(%r11) - je sysc_return + je .Lsysc_return sll %r10,2 l %r1,0(%r10,%r1) # load address of handler routine lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler - j sysc_return + j .Lsysc_return # # PER event in supervisor state, must be kprobes # -pgm_kprobe: +.Lpgm_kprobe: REENABLE_IRQS xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - l %r1,BASED(.Ldo_per_trap) + l %r1,BASED(.Lc_do_per_trap) lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # call do_per_trap - j sysc_return + j .Lsysc_return # # single stepped system call # -pgm_svcper: +.Lpgm_svcper: mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW - mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) + mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP - lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs + lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs /* * IO interrupt handler routine @@ -435,9 +435,9 @@ ENTRY(io_int_handler) l %r13,__LC_SVC_NEW_PSW+4 lm %r8,%r9,__LC_IO_OLD_PSW tmh %r8,0x0001 # interrupting from user ? - jz io_skip + jz .Lio_skip UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER -io_skip: +.Lio_skip: SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC @@ -446,35 +446,35 @@ io_skip: xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) -io_loop: - l %r1,BASED(.Ldo_IRQ) +.Lio_loop: + l %r1,BASED(.Lc_do_IRQ) lr %r2,%r11 # pass pointer to pt_regs lhi %r3,IO_INTERRUPT tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? - jz io_call + jz .Lio_call lhi %r3,THIN_INTERRUPT -io_call: +.Lio_call: basr %r14,%r1 # call do_IRQ tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR - jz io_return + jz .Lio_return tpi 0 - jz io_return + jz .Lio_return mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - j io_loop -io_return: + j .Lio_loop +.Lio_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON -io_tif: +.Lio_tif: tm __TI_flags+3(%r12),_TIF_WORK - jnz io_work # there is work to do (signals etc.) + jnz .Lio_work # there is work to do (signals etc.) tm __LC_CPU_FLAGS+3,_CIF_WORK - jnz io_work -io_restore: + jnz .Lio_work +.Lio_restore: mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) stpt __LC_EXIT_TIMER lm %r0,%r15,__PT_R0(%r11) lpsw __LC_RETURN_PSW -io_done: +.Lio_done: # # There is work todo, find out in which context we have been interrupted: @@ -483,15 +483,15 @@ io_done: # the preemption counter and if it is zero call preempt_schedule_irq # Before any work can be done, a switch to the kernel stack is required. # -io_work: +.Lio_work: tm __PT_PSW+1(%r11),0x01 # returning to user ? - jo io_work_user # yes -> do resched & signal + jo .Lio_work_user # yes -> do resched & signal #ifdef CONFIG_PREEMPT # check for preemptive scheduling icm %r0,15,__TI_precount(%r12) - jnz io_restore # preemption disabled + jnz .Lio_restore # preemption disabled tm __TI_flags+3(%r12),_TIF_NEED_RESCHED - jno io_restore + jno .Lio_restore # switch to kernel stack l %r1,__PT_R15(%r11) ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) @@ -499,20 +499,20 @@ io_work: xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lr %r15,%r1 - # TRACE_IRQS_ON already done at io_return, call + # TRACE_IRQS_ON already done at .Lio_return, call # TRACE_IRQS_OFF to keep things symmetrical TRACE_IRQS_OFF - l %r1,BASED(.Lpreempt_irq) + l %r1,BASED(.Lc_preempt_irq) basr %r14,%r1 # call preempt_schedule_irq - j io_return + j .Lio_return #else - j io_restore + j .Lio_restore #endif # # Need to do work before returning to userspace, switch to kernel stack # -io_work_user: +.Lio_work_user: l %r1,__LC_KERNEL_STACK mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) @@ -522,74 +522,74 @@ io_work_user: # # One of the work bits is on. Find out which one. # -io_work_tif: +.Lio_work_tif: tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING - jo io_mcck_pending + jo .Lio_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED - jo io_reschedule + jo .Lio_reschedule tm __TI_flags+3(%r12),_TIF_SIGPENDING - jo io_sigpending + jo .Lio_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME - jo io_notify_resume + jo .Lio_notify_resume tm __LC_CPU_FLAGS+3,_CIF_ASCE - jo io_uaccess - j io_return # beware of critical section cleanup + jo .Lio_uaccess + j .Lio_return # beware of critical section cleanup # # _CIF_MCCK_PENDING is set, call handler # -io_mcck_pending: - # TRACE_IRQS_ON already done at io_return - l %r1,BASED(.Lhandle_mcck) +.Lio_mcck_pending: + # TRACE_IRQS_ON already done at .Lio_return + l %r1,BASED(.Lc_handle_mcck) basr %r14,%r1 # TIF bit will be cleared by handler TRACE_IRQS_OFF - j io_return + j .Lio_return # # _CIF_ASCE is set, load user space asce # -io_uaccess: +.Lio_uaccess: ni __LC_CPU_FLAGS+3,255-_CIF_ASCE lctl %c1,%c1,__LC_USER_ASCE # load primary asce - j io_return + j .Lio_return # # _TIF_NEED_RESCHED is set, call schedule # -io_reschedule: - # TRACE_IRQS_ON already done at io_return - l %r1,BASED(.Lschedule) +.Lio_reschedule: + # TRACE_IRQS_ON already done at .Lio_return + l %r1,BASED(.Lc_schedule) ssm __LC_SVC_NEW_PSW # reenable interrupts basr %r14,%r1 # call scheduler ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - j io_return + j .Lio_return # # _TIF_SIGPENDING is set, call do_signal # -io_sigpending: - # TRACE_IRQS_ON already done at io_return - l %r1,BASED(.Ldo_signal) +.Lio_sigpending: + # TRACE_IRQS_ON already done at .Lio_return + l %r1,BASED(.Lc_do_signal) ssm __LC_SVC_NEW_PSW # reenable interrupts lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # call do_signal ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - j io_return + j .Lio_return # # _TIF_SIGPENDING is set, call do_signal # -io_notify_resume: - # TRACE_IRQS_ON already done at io_return - l %r1,BASED(.Ldo_notify_resume) +.Lio_notify_resume: + # TRACE_IRQS_ON already done at .Lio_return + l %r1,BASED(.Lc_do_notify_resume) ssm __LC_SVC_NEW_PSW # reenable interrupts lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # call do_notify_resume ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - j io_return + j .Lio_return /* * External interrupt handler routine @@ -603,9 +603,9 @@ ENTRY(ext_int_handler) l %r13,__LC_SVC_NEW_PSW+4 lm %r8,%r9,__LC_EXT_OLD_PSW tmh %r8,0x0001 # interrupting from user ? - jz ext_skip + jz .Lext_skip UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER -ext_skip: +.Lext_skip: SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC @@ -614,29 +614,29 @@ ext_skip: mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF - l %r1,BASED(.Ldo_IRQ) + l %r1,BASED(.Lc_do_IRQ) lr %r2,%r11 # pass pointer to pt_regs lhi %r3,EXT_INTERRUPT basr %r14,%r1 # call do_IRQ - j io_return + j .Lio_return /* - * Load idle PSW. The second "half" of this function is in cleanup_idle. + * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. */ ENTRY(psw_idle) st %r3,__SF_EMPTY(%r15) basr %r1,0 - la %r1,psw_idle_lpsw+4-.(%r1) + la %r1,.Lpsw_idle_lpsw+4-.(%r1) st %r1,__SF_EMPTY+4(%r15) oi __SF_EMPTY+4(%r15),0x80 stck __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) -psw_idle_lpsw: +.Lpsw_idle_lpsw: lpsw __SF_EMPTY(%r15) br %r14 -psw_idle_end: +.Lpsw_idle_end: -__critical_end: +.L__critical_end: /* * Machine check handler routines @@ -650,7 +650,7 @@ ENTRY(mcck_int_handler) l %r13,__LC_SVC_NEW_PSW+4 lm %r8,%r9,__LC_MCK_OLD_PSW tm __LC_MCCK_CODE,0x80 # system damage? - jo mcck_panic # yes -> rest of mcck code invalid + jo .Lmcck_panic # yes -> rest of mcck code invalid la %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? @@ -668,22 +668,22 @@ ENTRY(mcck_int_handler) 2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? - jno mcck_panic # no -> skip cleanup critical + jno .Lmcck_panic # no -> skip cleanup critical tm %r8,0x0001 # interrupting from user ? - jz mcck_skip + jz .Lmcck_skip UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER -mcck_skip: +.Lmcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - l %r1,BASED(.Ldo_machine_check) + l %r1,BASED(.Lc_do_machine_check) lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # call s390_do_machine_check tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno mcck_return + jno .Lmcck_return l %r1,__LC_KERNEL_STACK # switch to kernel stack mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) @@ -691,12 +691,12 @@ mcck_skip: lr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING - jno mcck_return + jno .Lmcck_return TRACE_IRQS_OFF - l %r1,BASED(.Lhandle_mcck) + l %r1,BASED(.Lc_handle_mcck) basr %r14,%r1 # call s390_handle_mcck TRACE_IRQS_ON -mcck_return: +.Lmcck_return: mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f @@ -706,15 +706,15 @@ mcck_return: 0: lm %r0,%r15,__PT_R0(%r11) lpsw __LC_RETURN_MCCK_PSW -mcck_panic: +.Lmcck_panic: l %r14,__LC_PANIC_STACK slr %r14,%r15 sra %r14,PAGE_SHIFT jz 0f l %r15,__LC_PANIC_STACK - j mcck_skip + j .Lmcck_skip 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j mcck_skip + j .Lmcck_skip # # PSW restart interrupt handler @@ -764,58 +764,58 @@ stack_overflow: 1: .long kernel_stack_overflow #endif -cleanup_table: +.Lcleanup_table: .long system_call + 0x80000000 - .long sysc_do_svc + 0x80000000 - .long sysc_tif + 0x80000000 - .long sysc_restore + 0x80000000 - .long sysc_done + 0x80000000 - .long io_tif + 0x80000000 - .long io_restore + 0x80000000 - .long io_done + 0x80000000 + .long .Lsysc_do_svc + 0x80000000 + .long .Lsysc_tif + 0x80000000 + .long .Lsysc_restore + 0x80000000 + .long .Lsysc_done + 0x80000000 + .long .Lio_tif + 0x80000000 + .long .Lio_restore + 0x80000000 + .long .Lio_done + 0x80000000 .long psw_idle + 0x80000000 - .long psw_idle_end + 0x80000000 + .long .Lpsw_idle_end + 0x80000000 cleanup_critical: - cl %r9,BASED(cleanup_table) # system_call + cl %r9,BASED(.Lcleanup_table) # system_call jl 0f - cl %r9,BASED(cleanup_table+4) # sysc_do_svc - jl cleanup_system_call - cl %r9,BASED(cleanup_table+8) # sysc_tif + cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + jl .Lcleanup_system_call + cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif jl 0f - cl %r9,BASED(cleanup_table+12) # sysc_restore - jl cleanup_sysc_tif - cl %r9,BASED(cleanup_table+16) # sysc_done - jl cleanup_sysc_restore - cl %r9,BASED(cleanup_table+20) # io_tif + cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore + jl .Lcleanup_sysc_tif + cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done + jl .Lcleanup_sysc_restore + cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif jl 0f - cl %r9,BASED(cleanup_table+24) # io_restore - jl cleanup_io_tif - cl %r9,BASED(cleanup_table+28) # io_done - jl cleanup_io_restore - cl %r9,BASED(cleanup_table+32) # psw_idle + cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore + jl .Lcleanup_io_tif + cl %r9,BASED(.Lcleanup_table+28) # .Lio_done + jl .Lcleanup_io_restore + cl %r9,BASED(.Lcleanup_table+32) # psw_idle jl 0f - cl %r9,BASED(cleanup_table+36) # psw_idle_end - jl cleanup_idle + cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end + jl .Lcleanup_idle 0: br %r14 -cleanup_system_call: +.Lcleanup_system_call: # check if stpt has been executed - cl %r9,BASED(cleanup_system_call_insn) + cl %r9,BASED(.Lcleanup_system_call_insn) jh 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER chi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 0: # check if stm has been executed - cl %r9,BASED(cleanup_system_call_insn+4) + cl %r9,BASED(.Lcleanup_system_call_insn+4) jh 0f mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 0: # set up saved registers r12, and r13 st %r12,16(%r11) # r12 thread-info pointer st %r13,20(%r11) # r13 literal-pool pointer # check if the user time calculation has been done - cl %r9,BASED(cleanup_system_call_insn+8) + cl %r9,BASED(.Lcleanup_system_call_insn+8) jh 0f l %r10,__LC_EXIT_TIMER l %r15,__LC_EXIT_TIMER+4 @@ -824,7 +824,7 @@ cleanup_system_call: st %r10,__LC_USER_TIMER st %r15,__LC_USER_TIMER+4 0: # check if the system time calculation has been done - cl %r9,BASED(cleanup_system_call_insn+12) + cl %r9,BASED(.Lcleanup_system_call_insn+12) jh 0f l %r10,__LC_LAST_UPDATE_TIMER l %r15,__LC_LAST_UPDATE_TIMER+4 @@ -848,20 +848,20 @@ cleanup_system_call: # setup saved register 15 st %r15,28(%r11) # r15 stack pointer # set new psw address and exit - l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 + l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 br %r14 -cleanup_system_call_insn: +.Lcleanup_system_call_insn: .long system_call + 0x80000000 - .long sysc_stm + 0x80000000 - .long sysc_vtime + 0x80000000 + 36 - .long sysc_vtime + 0x80000000 + 76 + .long .Lsysc_stm + 0x80000000 + .long .Lsysc_vtime + 0x80000000 + 36 + .long .Lsysc_vtime + 0x80000000 + 76 -cleanup_sysc_tif: - l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 +.Lcleanup_sysc_tif: + l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 br %r14 -cleanup_sysc_restore: - cl %r9,BASED(cleanup_sysc_restore_insn) +.Lcleanup_sysc_restore: + cl %r9,BASED(.Lcleanup_sysc_restore_insn) jhe 0f l %r9,12(%r11) # get saved pointer to pt_regs mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) @@ -869,15 +869,15 @@ cleanup_sysc_restore: lm %r0,%r7,__PT_R0(%r9) 0: lm %r8,%r9,__LC_RETURN_PSW br %r14 -cleanup_sysc_restore_insn: - .long sysc_done - 4 + 0x80000000 +.Lcleanup_sysc_restore_insn: + .long .Lsysc_done - 4 + 0x80000000 -cleanup_io_tif: - l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 +.Lcleanup_io_tif: + l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 br %r14 -cleanup_io_restore: - cl %r9,BASED(cleanup_io_restore_insn) +.Lcleanup_io_restore: + cl %r9,BASED(.Lcleanup_io_restore_insn) jhe 0f l %r9,12(%r11) # get saved r11 pointer to pt_regs mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) @@ -885,10 +885,10 @@ cleanup_io_restore: lm %r0,%r7,__PT_R0(%r9) 0: lm %r8,%r9,__LC_RETURN_PSW br %r14 -cleanup_io_restore_insn: - .long io_done - 4 + 0x80000000 +.Lcleanup_io_restore_insn: + .long .Lio_done - 4 + 0x80000000 -cleanup_idle: +.Lcleanup_idle: # copy interrupt clock & cpu timer mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER @@ -897,7 +897,7 @@ cleanup_idle: mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 0: # check if stck has been executed - cl %r9,BASED(cleanup_idle_insn) + cl %r9,BASED(.Lcleanup_idle_insn) jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) @@ -913,12 +913,12 @@ cleanup_idle: stm %r9,%r10,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) # prepare return psw - n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits + n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits l %r9,24(%r11) # return from psw_idle br %r14 -cleanup_idle_insn: - .long psw_idle_lpsw + 0x80000000 -cleanup_idle_wait: +.Lcleanup_idle_insn: + .long .Lpsw_idle_lpsw + 0x80000000 +.Lcleanup_idle_wait: .long 0xfcfdffff /* @@ -933,30 +933,30 @@ cleanup_idle_wait: /* * Symbol constants */ -.Ldo_machine_check: .long s390_do_machine_check -.Lhandle_mcck: .long s390_handle_mcck -.Ldo_IRQ: .long do_IRQ -.Ldo_signal: .long do_signal -.Ldo_notify_resume: .long do_notify_resume -.Ldo_per_trap: .long do_per_trap -.Ljump_table: .long pgm_check_table -.Lschedule: .long schedule +.Lc_do_machine_check: .long s390_do_machine_check +.Lc_handle_mcck: .long s390_handle_mcck +.Lc_do_IRQ: .long do_IRQ +.Lc_do_signal: .long do_signal +.Lc_do_notify_resume: .long do_notify_resume +.Lc_do_per_trap: .long do_per_trap +.Lc_jump_table: .long pgm_check_table +.Lc_schedule: .long schedule #ifdef CONFIG_PREEMPT -.Lpreempt_irq: .long preempt_schedule_irq +.Lc_preempt_irq: .long preempt_schedule_irq #endif -.Ltrace_enter: .long do_syscall_trace_enter -.Ltrace_exit: .long do_syscall_trace_exit -.Lschedule_tail: .long schedule_tail -.Lsysc_per: .long sysc_per + 0x80000000 +.Lc_trace_enter: .long do_syscall_trace_enter +.Lc_trace_exit: .long do_syscall_trace_exit +.Lc_schedule_tail: .long schedule_tail +.Lc_sysc_per: .long .Lsysc_per + 0x80000000 #ifdef CONFIG_TRACE_IRQFLAGS -.Lhardirqs_on: .long trace_hardirqs_on_caller -.Lhardirqs_off: .long trace_hardirqs_off_caller +.Lc_hardirqs_on: .long trace_hardirqs_on_caller +.Lc_hardirqs_off: .long trace_hardirqs_off_caller #endif #ifdef CONFIG_LOCKDEP -.Llockdep_sys_exit: .long lockdep_sys_exit +.Lc_lockdep_sys_exit: .long lockdep_sys_exit #endif -.Lcritical_start: .long __critical_start + 0x80000000 -.Lcritical_length: .long __critical_end - __critical_start +.Lc_critical_start: .long .L__critical_start + 0x80000000 +.Lc_critical_length: .long .L__critical_end - .L__critical_start .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esa diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 0554b9771c9f..8e61393c8275 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -74,4 +74,6 @@ struct old_sigaction; long sys_s390_personality(unsigned int personality); long sys_s390_runtime_instr(int command, int signum); +long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); +long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 7b2e03afd017..c329446a951d 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -91,7 +91,7 @@ _PIF_WORK = (_PIF_PER_TRAP) .if \reason==1 # Some program interrupts are suppressing (e.g. protection). # We must also check the instruction after SIE in that case. - # do_protection_exception will rewind to rewind_pad + # do_protection_exception will rewind to .Lrewind_pad jh .+42 .else jhe .+42 @@ -192,7 +192,7 @@ ENTRY(__switch_to) lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 -__critical_start: +.L__critical_start: /* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. @@ -200,15 +200,15 @@ __critical_start: ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER -sysc_stmg: +.Lsysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO lghi %r14,_PIF_SYSCALL -sysc_per: +.Lsysc_per: lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs -sysc_vtime: +.Lsysc_vtime: UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER LAST_BREAK %r13 stmg %r0,%r7,__PT_R0(%r11) @@ -216,39 +216,39 @@ sysc_vtime: mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC stg %r14,__PT_FLAGS(%r11) -sysc_do_svc: +.Lsysc_do_svc: lg %r10,__TI_sysc_table(%r12) # address of system call table llgh %r8,__PT_INT_CODE+2(%r11) slag %r8,%r8,2 # shift and test for svc 0 - jnz sysc_nr_ok + jnz .Lsysc_nr_ok # svc 0: system call number in %r1 llgfr %r1,%r1 # clear high word in r1 cghi %r1,NR_syscalls - jnl sysc_nr_ok + jnl .Lsysc_nr_ok sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,2 -sysc_nr_ok: +.Lsysc_nr_ok: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lgf %r9,0(%r8,%r10) # get system call add. tm __TI_flags+7(%r12),_TIF_TRACE - jnz sysc_tracesys + jnz .Lsysc_tracesys basr %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value -sysc_return: +.Lsysc_return: LOCKDEP_SYS_EXIT -sysc_tif: +.Lsysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno sysc_restore + jno .Lsysc_restore tm __PT_FLAGS+7(%r11),_PIF_WORK - jnz sysc_work + jnz .Lsysc_work tm __TI_flags+7(%r12),_TIF_WORK - jnz sysc_work # check for work + jnz .Lsysc_work # check for work tm __LC_CPU_FLAGS+7,_CIF_WORK - jnz sysc_work -sysc_restore: + jnz .Lsysc_work +.Lsysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) @@ -256,101 +256,101 @@ sysc_restore: mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW -sysc_done: +.Lsysc_done: # # One of the work bits is on. Find out which one. # -sysc_work: +.Lsysc_work: tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING - jo sysc_mcck_pending + jo .Lsysc_mcck_pending tm __TI_flags+7(%r12),_TIF_NEED_RESCHED - jo sysc_reschedule + jo .Lsysc_reschedule #ifdef CONFIG_UPROBES tm __TI_flags+7(%r12),_TIF_UPROBE - jo sysc_uprobe_notify + jo .Lsysc_uprobe_notify #endif tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP - jo sysc_singlestep + jo .Lsysc_singlestep tm __TI_flags+7(%r12),_TIF_SIGPENDING - jo sysc_sigpending + jo .Lsysc_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME - jo sysc_notify_resume + jo .Lsysc_notify_resume tm __LC_CPU_FLAGS+7,_CIF_ASCE - jo sysc_uaccess - j sysc_return # beware of critical section cleanup + jo .Lsysc_uaccess + j .Lsysc_return # beware of critical section cleanup # # _TIF_NEED_RESCHED is set, call schedule # -sysc_reschedule: - larl %r14,sysc_return +.Lsysc_reschedule: + larl %r14,.Lsysc_return jg schedule # # _CIF_MCCK_PENDING is set, call handler # -sysc_mcck_pending: - larl %r14,sysc_return +.Lsysc_mcck_pending: + larl %r14,.Lsysc_return jg s390_handle_mcck # TIF bit will be cleared by handler # # _CIF_ASCE is set, load user space asce # -sysc_uaccess: +.Lsysc_uaccess: ni __LC_CPU_FLAGS+7,255-_CIF_ASCE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - j sysc_return + j .Lsysc_return # # _TIF_SIGPENDING is set, call do_signal # -sysc_sigpending: +.Lsysc_sigpending: lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal tm __PT_FLAGS+7(%r11),_PIF_SYSCALL - jno sysc_return + jno .Lsysc_return lmg %r2,%r7,__PT_R2(%r11) # load svc arguments lg %r10,__TI_sysc_table(%r12) # address of system call table lghi %r8,0 # svc 0 returns -ENOSYS llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number cghi %r1,NR_syscalls - jnl sysc_nr_ok # invalid svc number -> do svc 0 + jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 slag %r8,%r1,2 - j sysc_nr_ok # restart svc + j .Lsysc_nr_ok # restart svc # # _TIF_NOTIFY_RESUME is set, call do_notify_resume # -sysc_notify_resume: +.Lsysc_notify_resume: lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,sysc_return + larl %r14,.Lsysc_return jg do_notify_resume # # _TIF_UPROBE is set, call uprobe_notify_resume # #ifdef CONFIG_UPROBES -sysc_uprobe_notify: +.Lsysc_uprobe_notify: lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,sysc_return + larl %r14,.Lsysc_return jg uprobe_notify_resume #endif # # _PIF_PER_TRAP is set, call do_per_trap # -sysc_singlestep: +.Lsysc_singlestep: ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,sysc_return + larl %r14,.Lsysc_return jg do_per_trap # # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # and after the system call # -sysc_tracesys: +.Lsysc_tracesys: lgr %r2,%r11 # pass pointer to pt_regs la %r3,0 llgh %r0,__PT_INT_CODE+2(%r11) @@ -358,20 +358,20 @@ sysc_tracesys: brasl %r14,do_syscall_trace_enter lghi %r0,NR_syscalls clgr %r0,%r2 - jnh sysc_tracenogo + jnh .Lsysc_tracenogo sllg %r8,%r2,2 lgf %r9,0(%r8,%r10) -sysc_tracego: +.Lsysc_tracego: lmg %r3,%r7,__PT_R3(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r2,__PT_ORIG_GPR2(%r11) basr %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value -sysc_tracenogo: +.Lsysc_tracenogo: tm __TI_flags+7(%r12),_TIF_TRACE - jz sysc_return + jz .Lsysc_return lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,sysc_return + larl %r14,.Lsysc_return jg do_syscall_trace_exit # @@ -384,13 +384,13 @@ ENTRY(ret_from_fork) TRACE_IRQS_ON ssm __LC_SVC_NEW_PSW # reenable interrupts tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? - jne sysc_tracenogo + jne .Lsysc_tracenogo # it's a kernel thread lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) basr %r14,%r9 - j sysc_tracenogo + j .Lsysc_tracenogo /* * Program check handler routine @@ -409,7 +409,7 @@ ENTRY(pgm_check_handler) tmhh %r8,0x4000 # PER bit set in old PSW ? jnz 0f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception - jnz pgm_svcper # -> single stepped svc + jnz .Lpgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f @@ -432,7 +432,7 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 0f tmhh %r8,0x0001 # kernel per event ? - jz pgm_kprobe + jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE @@ -443,31 +443,31 @@ ENTRY(pgm_check_handler) llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f sll %r10,2 - je sysc_return + je .Lsysc_return lgf %r1,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler - j sysc_return + j .Lsysc_return # # PER event in supervisor state, must be kprobes # -pgm_kprobe: +.Lpgm_kprobe: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_per_trap - j sysc_return + j .Lsysc_return # # single stepped system call # -pgm_svcper: +.Lpgm_svcper: mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW - larl %r14,sysc_per + larl %r14,.Lsysc_per stg %r14,__LC_RETURN_PSW+8 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP - lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs + lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs /* * IO interrupt handler routine @@ -483,10 +483,10 @@ ENTRY(io_int_handler) HANDLE_SIE_INTERCEPT %r14,2 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT tmhh %r8,0x0001 # interrupting from user? - jz io_skip + jz .Lio_skip UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER LAST_BREAK %r14 -io_skip: +.Lio_skip: stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -494,29 +494,29 @@ io_skip: xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) -io_loop: +.Lio_loop: lgr %r2,%r11 # pass pointer to pt_regs lghi %r3,IO_INTERRUPT tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? - jz io_call + jz .Lio_call lghi %r3,THIN_INTERRUPT -io_call: +.Lio_call: brasl %r14,do_IRQ tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR - jz io_return + jz .Lio_return tpi 0 - jz io_return + jz .Lio_return mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - j io_loop -io_return: + j .Lio_loop +.Lio_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON -io_tif: +.Lio_tif: tm __TI_flags+7(%r12),_TIF_WORK - jnz io_work # there is work to do (signals etc.) + jnz .Lio_work # there is work to do (signals etc.) tm __LC_CPU_FLAGS+7,_CIF_WORK - jnz io_work -io_restore: + jnz .Lio_work +.Lio_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) @@ -524,7 +524,7 @@ io_restore: mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW -io_done: +.Lio_done: # # There is work todo, find out in which context we have been interrupted: @@ -535,15 +535,15 @@ io_done: # the preemption counter and if it is zero call preempt_schedule_irq # Before any work can be done, a switch to the kernel stack is required. # -io_work: +.Lio_work: tm __PT_PSW+1(%r11),0x01 # returning to user ? - jo io_work_user # yes -> do resched & signal + jo .Lio_work_user # yes -> do resched & signal #ifdef CONFIG_PREEMPT # check for preemptive scheduling icm %r0,15,__TI_precount(%r12) - jnz io_restore # preemption is disabled + jnz .Lio_restore # preemption is disabled tm __TI_flags+7(%r12),_TIF_NEED_RESCHED - jno io_restore + jno .Lio_restore # switch to kernel stack lg %r1,__PT_R15(%r11) aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) @@ -551,19 +551,19 @@ io_work: xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 - # TRACE_IRQS_ON already done at io_return, call + # TRACE_IRQS_ON already done at .Lio_return, call # TRACE_IRQS_OFF to keep things symmetrical TRACE_IRQS_OFF brasl %r14,preempt_schedule_irq - j io_return + j .Lio_return #else - j io_restore + j .Lio_restore #endif # # Need to do work before returning to userspace, switch to kernel stack # -io_work_user: +.Lio_work_user: lg %r1,__LC_KERNEL_STACK mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) @@ -573,70 +573,70 @@ io_work_user: # # One of the work bits is on. Find out which one. # -io_work_tif: +.Lio_work_tif: tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING - jo io_mcck_pending + jo .Lio_mcck_pending tm __TI_flags+7(%r12),_TIF_NEED_RESCHED - jo io_reschedule + jo .Lio_reschedule tm __TI_flags+7(%r12),_TIF_SIGPENDING - jo io_sigpending + jo .Lio_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME - jo io_notify_resume + jo .Lio_notify_resume tm __LC_CPU_FLAGS+7,_CIF_ASCE - jo io_uaccess - j io_return # beware of critical section cleanup + jo .Lio_uaccess + j .Lio_return # beware of critical section cleanup # # _CIF_MCCK_PENDING is set, call handler # -io_mcck_pending: - # TRACE_IRQS_ON already done at io_return +.Lio_mcck_pending: + # TRACE_IRQS_ON already done at .Lio_return brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler TRACE_IRQS_OFF - j io_return + j .Lio_return # # _CIF_ASCE is set, load user space asce # -io_uaccess: +.Lio_uaccess: ni __LC_CPU_FLAGS+7,255-_CIF_ASCE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - j io_return + j .Lio_return # # _TIF_NEED_RESCHED is set, call schedule # -io_reschedule: - # TRACE_IRQS_ON already done at io_return +.Lio_reschedule: + # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts brasl %r14,schedule # call scheduler ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - j io_return + j .Lio_return # # _TIF_SIGPENDING or is set, call do_signal # -io_sigpending: - # TRACE_IRQS_ON already done at io_return +.Lio_sigpending: + # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - j io_return + j .Lio_return # # _TIF_NOTIFY_RESUME or is set, call do_notify_resume # -io_notify_resume: - # TRACE_IRQS_ON already done at io_return +.Lio_notify_resume: + # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_notify_resume ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - j io_return + j .Lio_return /* * External interrupt handler routine @@ -652,10 +652,10 @@ ENTRY(ext_int_handler) HANDLE_SIE_INTERCEPT %r14,3 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT tmhh %r8,0x0001 # interrupting from user ? - jz ext_skip + jz .Lext_skip UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER LAST_BREAK %r14 -ext_skip: +.Lext_skip: stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -669,23 +669,23 @@ ext_skip: lgr %r2,%r11 # pass pointer to pt_regs lghi %r3,EXT_INTERRUPT brasl %r14,do_IRQ - j io_return + j .Lio_return /* - * Load idle PSW. The second "half" of this function is in cleanup_idle. + * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. */ ENTRY(psw_idle) stg %r3,__SF_EMPTY(%r15) - larl %r1,psw_idle_lpsw+4 + larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) -psw_idle_lpsw: +.Lpsw_idle_lpsw: lpswe __SF_EMPTY(%r15) br %r14 -psw_idle_end: +.Lpsw_idle_end: -__critical_end: +.L__critical_end: /* * Machine check handler routines @@ -701,7 +701,7 @@ ENTRY(mcck_int_handler) lmg %r8,%r9,__LC_MCK_OLD_PSW HANDLE_SIE_INTERCEPT %r14,4 tm __LC_MCCK_CODE,0x80 # system damage? - jo mcck_panic # yes -> rest of mcck code invalid + jo .Lmcck_panic # yes -> rest of mcck code invalid lghi %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? @@ -719,13 +719,13 @@ ENTRY(mcck_int_handler) 2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? - jno mcck_panic # no -> skip cleanup critical + jno .Lmcck_panic # no -> skip cleanup critical SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT tm %r8,0x0001 # interrupting from user ? - jz mcck_skip + jz .Lmcck_skip UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 -mcck_skip: +.Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),0(%r14) @@ -735,7 +735,7 @@ mcck_skip: lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno mcck_return + jno .Lmcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) @@ -743,11 +743,11 @@ mcck_skip: lgr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING - jno mcck_return + jno .Lmcck_return TRACE_IRQS_OFF brasl %r14,s390_handle_mcck TRACE_IRQS_ON -mcck_return: +.Lmcck_return: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW @@ -758,14 +758,14 @@ mcck_return: 0: lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_MCCK_PSW -mcck_panic: +.Lmcck_panic: lg %r14,__LC_PANIC_STACK slgr %r14,%r15 srag %r14,%r14,PAGE_SHIFT jz 0f lg %r15,__LC_PANIC_STACK 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j mcck_skip + j .Lmcck_skip # # PSW restart interrupt handler @@ -815,69 +815,69 @@ stack_overflow: #endif .align 8 -cleanup_table: +.Lcleanup_table: .quad system_call - .quad sysc_do_svc - .quad sysc_tif - .quad sysc_restore - .quad sysc_done - .quad io_tif - .quad io_restore - .quad io_done + .quad .Lsysc_do_svc + .quad .Lsysc_tif + .quad .Lsysc_restore + .quad .Lsysc_done + .quad .Lio_tif + .quad .Lio_restore + .quad .Lio_done .quad psw_idle - .quad psw_idle_end + .quad .Lpsw_idle_end cleanup_critical: - clg %r9,BASED(cleanup_table) # system_call + clg %r9,BASED(.Lcleanup_table) # system_call jl 0f - clg %r9,BASED(cleanup_table+8) # sysc_do_svc - jl cleanup_system_call - clg %r9,BASED(cleanup_table+16) # sysc_tif + clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc + jl .Lcleanup_system_call + clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif jl 0f - clg %r9,BASED(cleanup_table+24) # sysc_restore - jl cleanup_sysc_tif - clg %r9,BASED(cleanup_table+32) # sysc_done - jl cleanup_sysc_restore - clg %r9,BASED(cleanup_table+40) # io_tif + clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore + jl .Lcleanup_sysc_tif + clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done + jl .Lcleanup_sysc_restore + clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif jl 0f - clg %r9,BASED(cleanup_table+48) # io_restore - jl cleanup_io_tif - clg %r9,BASED(cleanup_table+56) # io_done - jl cleanup_io_restore - clg %r9,BASED(cleanup_table+64) # psw_idle + clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore + jl .Lcleanup_io_tif + clg %r9,BASED(.Lcleanup_table+56) # .Lio_done + jl .Lcleanup_io_restore + clg %r9,BASED(.Lcleanup_table+64) # psw_idle jl 0f - clg %r9,BASED(cleanup_table+72) # psw_idle_end - jl cleanup_idle + clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end + jl .Lcleanup_idle 0: br %r14 -cleanup_system_call: +.Lcleanup_system_call: # check if stpt has been executed - clg %r9,BASED(cleanup_system_call_insn) + clg %r9,BASED(.Lcleanup_system_call_insn) jh 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER cghi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 0: # check if stmg has been executed - clg %r9,BASED(cleanup_system_call_insn+8) + clg %r9,BASED(.Lcleanup_system_call_insn+8) jh 0f mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 0: # check if base register setup + TIF bit load has been done - clg %r9,BASED(cleanup_system_call_insn+16) + clg %r9,BASED(.Lcleanup_system_call_insn+16) jhe 0f # set up saved registers r10 and r12 stg %r10,16(%r11) # r10 last break stg %r12,32(%r11) # r12 thread-info pointer 0: # check if the user time update has been done - clg %r9,BASED(cleanup_system_call_insn+24) + clg %r9,BASED(.Lcleanup_system_call_insn+24) jh 0f lg %r15,__LC_EXIT_TIMER slg %r15,__LC_SYNC_ENTER_TIMER alg %r15,__LC_USER_TIMER stg %r15,__LC_USER_TIMER 0: # check if the system time update has been done - clg %r9,BASED(cleanup_system_call_insn+32) + clg %r9,BASED(.Lcleanup_system_call_insn+32) jh 0f lg %r15,__LC_LAST_UPDATE_TIMER slg %r15,__LC_EXIT_TIMER @@ -904,21 +904,21 @@ cleanup_system_call: # setup saved register r15 stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit - larl %r9,sysc_do_svc + larl %r9,.Lsysc_do_svc br %r14 -cleanup_system_call_insn: +.Lcleanup_system_call_insn: .quad system_call - .quad sysc_stmg - .quad sysc_per - .quad sysc_vtime+18 - .quad sysc_vtime+42 + .quad .Lsysc_stmg + .quad .Lsysc_per + .quad .Lsysc_vtime+18 + .quad .Lsysc_vtime+42 -cleanup_sysc_tif: - larl %r9,sysc_tif +.Lcleanup_sysc_tif: + larl %r9,.Lsysc_tif br %r14 -cleanup_sysc_restore: - clg %r9,BASED(cleanup_sysc_restore_insn) +.Lcleanup_sysc_restore: + clg %r9,BASED(.Lcleanup_sysc_restore_insn) je 0f lg %r9,24(%r11) # get saved pointer to pt_regs mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) @@ -926,15 +926,15 @@ cleanup_sysc_restore: lmg %r0,%r7,__PT_R0(%r9) 0: lmg %r8,%r9,__LC_RETURN_PSW br %r14 -cleanup_sysc_restore_insn: - .quad sysc_done - 4 +.Lcleanup_sysc_restore_insn: + .quad .Lsysc_done - 4 -cleanup_io_tif: - larl %r9,io_tif +.Lcleanup_io_tif: + larl %r9,.Lio_tif br %r14 -cleanup_io_restore: - clg %r9,BASED(cleanup_io_restore_insn) +.Lcleanup_io_restore: + clg %r9,BASED(.Lcleanup_io_restore_insn) je 0f lg %r9,24(%r11) # get saved r11 pointer to pt_regs mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) @@ -942,10 +942,10 @@ cleanup_io_restore: lmg %r0,%r7,__PT_R0(%r9) 0: lmg %r8,%r9,__LC_RETURN_PSW br %r14 -cleanup_io_restore_insn: - .quad io_done - 4 +.Lcleanup_io_restore_insn: + .quad .Lio_done - 4 -cleanup_idle: +.Lcleanup_idle: # copy interrupt clock & cpu timer mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER @@ -954,7 +954,7 @@ cleanup_idle: mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 0: # check if stck & stpt have been executed - clg %r9,BASED(cleanup_idle_insn) + clg %r9,BASED(.Lcleanup_idle_insn) jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) @@ -973,17 +973,17 @@ cleanup_idle: nihh %r8,0xfcfd # clear irq & wait state bits lg %r9,48(%r11) # return from psw_idle br %r14 -cleanup_idle_insn: - .quad psw_idle_lpsw +.Lcleanup_idle_insn: + .quad .Lpsw_idle_lpsw /* * Integer constants */ .align 8 .Lcritical_start: - .quad __critical_start + .quad .L__critical_start .Lcritical_length: - .quad __critical_end - __critical_start + .quad .L__critical_end - .L__critical_start #if IS_ENABLED(CONFIG_KVM) @@ -1000,25 +1000,25 @@ ENTRY(sie64a) lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 - jz sie_gmap + jz .Lsie_gmap lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce -sie_gmap: +.Lsie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now tm __SIE_PROG20+3(%r14),1 # last exit... - jnz sie_done + jnz .Lsie_done LPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) -sie_done: +.Lsie_done: LPP __SF_EMPTY+16(%r15) # set host id ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and sie_done should not cause program +# instructions between sie64a and .Lsie_done should not cause program # interrupts. So lets use a nop (47 00 00 00) as a landing pad. # See also HANDLE_SIE_INTERCEPT -rewind_pad: +.Lrewind_pad: nop 0 .globl sie_exit sie_exit: @@ -1027,19 +1027,19 @@ sie_exit: lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_EMPTY+24(%r15) # return exit reason code br %r14 -sie_fault: +.Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_EMPTY+24(%r15) # set exit reason code j sie_exit .align 8 .Lsie_critical: - .quad sie_gmap + .quad .Lsie_gmap .Lsie_critical_length: - .quad sie_done - sie_gmap + .quad .Lsie_done - .Lsie_gmap - EX_TABLE(rewind_pad,sie_fault) - EX_TABLE(sie_exit,sie_fault) + EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(sie_exit,.Lsie_fault) #endif .section .rodata, "a" diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index ca1cabb3a96c..b86bb8823f15 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -7,6 +7,7 @@ * Martin Schwidefsky <schwidefsky@de.ibm.com> */ +#include <linux/moduleloader.h> #include <linux/hardirq.h> #include <linux/uaccess.h> #include <linux/ftrace.h> @@ -15,60 +16,39 @@ #include <linux/kprobes.h> #include <trace/syscall.h> #include <asm/asm-offsets.h> +#include <asm/cacheflush.h> #include "entry.h" -void mcount_replace_code(void); -void ftrace_disable_code(void); -void ftrace_enable_insn(void); - /* * The mcount code looks like this: * stg %r14,8(%r15) # offset 0 * larl %r1,<&counter> # offset 6 * brasl %r14,_mcount # offset 12 * lg %r14,8(%r15) # offset 18 - * Total length is 24 bytes. The complete mcount block initially gets replaced - * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop - * only patch the jg/lg instruction within the block. - * Note: we do not patch the first instruction to an unconditional branch, - * since that would break kprobes/jprobes. It is easier to leave the larl - * instruction in and only modify the second instruction. + * Total length is 24 bytes. Only the first instruction will be patched + * by ftrace_make_call / ftrace_make_nop. * The enabled ftrace code block looks like this: - * larl %r0,.+24 # offset 0 - * > lg %r1,__LC_FTRACE_FUNC # offset 6 - * br %r1 # offset 12 - * brcl 0,0 # offset 14 - * brc 0,0 # offset 20 + * > brasl %r0,ftrace_caller # offset 0 + * larl %r1,<&counter> # offset 6 + * brasl %r14,_mcount # offset 12 + * lg %r14,8(%r15) # offset 18 * The ftrace function gets called with a non-standard C function call ABI * where r0 contains the return address. It is also expected that the called * function only clobbers r0 and r1, but restores r2-r15. + * For module code we can't directly jump to ftrace caller, but need a + * trampoline (ftrace_plt), which clobbers also r1. * The return point of the ftrace function has offset 24, so execution * continues behind the mcount block. - * larl %r0,.+24 # offset 0 - * > jg .+18 # offset 6 - * br %r1 # offset 12 - * brcl 0,0 # offset 14 - * brc 0,0 # offset 20 + * The disabled ftrace code block looks like this: + * > jg .+24 # offset 0 + * larl %r1,<&counter> # offset 6 + * brasl %r14,_mcount # offset 12 + * lg %r14,8(%r15) # offset 18 * The jg instruction branches to offset 24 to skip as many instructions * as possible. */ -asm( - " .align 4\n" - "mcount_replace_code:\n" - " larl %r0,0f\n" - "ftrace_disable_code:\n" - " jg 0f\n" - " br %r1\n" - " brcl 0,0\n" - " brc 0,0\n" - "0:\n" - " .align 4\n" - "ftrace_enable_insn:\n" - " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); - -#define MCOUNT_BLOCK_SIZE 24 -#define MCOUNT_INSN_OFFSET 6 -#define FTRACE_INSN_SIZE 6 + +unsigned long ftrace_plt; int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) @@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - /* Initial replacement of the whole mcount block */ - if (addr == MCOUNT_ADDR) { - if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, - mcount_replace_code, - MCOUNT_BLOCK_SIZE)) - return -EPERM; - return 0; + struct ftrace_insn insn; + unsigned short op; + void *from, *to; + size_t size; + + ftrace_generate_nop_insn(&insn); + size = sizeof(insn); + from = &insn; + to = (void *) rec->ip; + if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) + return -EFAULT; + /* + * If we find a breakpoint instruction, a kprobe has been placed + * at the beginning of the function. We write the constant + * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original + * instruction so that the kprobes handler can execute a nop, if it + * reaches this breakpoint. + */ + if (op == BREAKPOINT_INSTRUCTION) { + size -= 2; + from += 2; + to += 2; + insn.disp = KPROBE_ON_FTRACE_NOP; } - if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, - MCOUNT_INSN_SIZE)) + if (probe_kernel_write(to, from, size)) return -EPERM; return 0; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, - FTRACE_INSN_SIZE)) + struct ftrace_insn insn; + unsigned short op; + void *from, *to; + size_t size; + + ftrace_generate_call_insn(&insn, rec->ip); + size = sizeof(insn); + from = &insn; + to = (void *) rec->ip; + if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) + return -EFAULT; + /* + * If we find a breakpoint instruction, a kprobe has been placed + * at the beginning of the function. We write the constant + * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original + * instruction so that the kprobes handler can execute a brasl if it + * reaches this breakpoint. + */ + if (op == BREAKPOINT_INSTRUCTION) { + size -= 2; + from += 2; + to += 2; + insn.disp = KPROBE_ON_FTRACE_CALL; + } + if (probe_kernel_write(to, from, size)) return -EPERM; return 0; } @@ -111,13 +129,30 @@ int __init ftrace_dyn_arch_init(void) return 0; } +static int __init ftrace_plt_init(void) +{ + unsigned int *ip; + + ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE); + if (!ftrace_plt) + panic("cannot allocate ftrace plt\n"); + ip = (unsigned int *) ftrace_plt; + ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ + ip[1] = 0x100a0004; + ip[2] = 0x07f10000; + ip[3] = FTRACE_ADDR >> 32; + ip[4] = FTRACE_ADDR & 0xffffffff; + set_memory_ro(ftrace_plt, 1); + return 0; +} +device_initcall(ftrace_plt_init); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addresses * in current thread info. */ -unsigned long __kprobes prepare_ftrace_return(unsigned long parent, - unsigned long ip) +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { struct ftrace_graph_ent trace; @@ -137,6 +172,7 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, out: return parent; } +NOKPROBE_SYMBOL(prepare_ftrace_return); /* * Patch the kernel code at ftrace_graph_caller location. The instruction diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 7559f1beab29..7a55c29b0b33 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -19,7 +19,7 @@ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); -void __kprobes enabled_wait(void) +void enabled_wait(void) { struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); unsigned long long idle_time; @@ -35,31 +35,32 @@ void __kprobes enabled_wait(void) /* Call the assembler magic in entry.S */ psw_idle(idle, psw_mask); + trace_hardirqs_off(); + /* Account time spent with enabled wait psw loaded as idle time. */ - idle->sequence++; - smp_wmb(); + write_seqcount_begin(&idle->seqcount); idle_time = idle->clock_idle_exit - idle->clock_idle_enter; idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->idle_time += idle_time; idle->idle_count++; account_idle_time(idle_time); - smp_wmb(); - idle->sequence++; + write_seqcount_end(&idle->seqcount); } +NOKPROBE_SYMBOL(enabled_wait); static ssize_t show_idle_count(struct device *dev, struct device_attribute *attr, char *buf) { struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); unsigned long long idle_count; - unsigned int sequence; + unsigned int seq; do { - sequence = ACCESS_ONCE(idle->sequence); + seq = read_seqcount_begin(&idle->seqcount); idle_count = ACCESS_ONCE(idle->idle_count); if (ACCESS_ONCE(idle->clock_idle_enter)) idle_count++; - } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); + } while (read_seqcount_retry(&idle->seqcount, seq)); return sprintf(buf, "%llu\n", idle_count); } DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); @@ -69,15 +70,15 @@ static ssize_t show_idle_time(struct device *dev, { struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); unsigned long long now, idle_time, idle_enter, idle_exit; - unsigned int sequence; + unsigned int seq; do { now = get_tod_clock(); - sequence = ACCESS_ONCE(idle->sequence); + seq = read_seqcount_begin(&idle->seqcount); idle_time = ACCESS_ONCE(idle->idle_time); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_exit = ACCESS_ONCE(idle->clock_idle_exit); - } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); + } while (read_seqcount_retry(&idle->seqcount, seq)); idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; return sprintf(buf, "%llu\n", idle_time >> 12); } @@ -87,14 +88,14 @@ cputime64_t arch_cpu_idle_time(int cpu) { struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); unsigned long long now, idle_enter, idle_exit; - unsigned int sequence; + unsigned int seq; do { now = get_tod_clock(); - sequence = ACCESS_ONCE(idle->sequence); + seq = read_seqcount_begin(&idle->seqcount); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_exit = ACCESS_ONCE(idle->clock_idle_exit); - } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); + } while (read_seqcount_retry(&idle->seqcount, seq)); return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1b8a38ab7861..f238720690f3 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -127,13 +127,10 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(cpu) seq_printf(p, "CPU%d ", cpu); seq_putc(p, '\n'); - goto out; } if (index < NR_IRQS) { if (index >= NR_IRQS_BASE) goto out; - /* Adjust index to process irqclass_main_desc array entries */ - index--; seq_printf(p, "%s: ", irqclass_main_desc[index].name); irq = irqclass_main_desc[index].irq; for_each_online_cpu(cpu) @@ -158,7 +155,7 @@ out: unsigned int arch_dynirq_lower_bound(unsigned int from) { - return from < THIN_INTERRUPT ? THIN_INTERRUPT : from; + return from < NR_IRQS_BASE ? NR_IRQS_BASE : from; } /* diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 014d4729b134..1e4c710dfb92 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/hardirq.h> +#include <linux/ftrace.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/dis.h> @@ -58,12 +59,23 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = { .insn_size = MAX_INSN_SIZE, }; -static void __kprobes copy_instruction(struct kprobe *p) +static void copy_instruction(struct kprobe *p) { + unsigned long ip = (unsigned long) p->addr; s64 disp, new_disp; u64 addr, new_addr; - memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); + if (ftrace_location(ip) == ip) { + /* + * If kprobes patches the instruction that is morphed by + * ftrace make sure that kprobes always sees the branch + * "jg .+24" that skips the mcount block + */ + ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); + p->ainsn.is_ftrace_insn = 1; + } else + memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); + p->opcode = p->ainsn.insn[0]; if (!probe_is_insn_relative_long(p->ainsn.insn)) return; /* @@ -79,25 +91,14 @@ static void __kprobes copy_instruction(struct kprobe *p) new_disp = ((addr + (disp * 2)) - new_addr) / 2; *(s32 *)&p->ainsn.insn[1] = new_disp; } +NOKPROBE_SYMBOL(copy_instruction); static inline int is_kernel_addr(void *addr) { return addr < (void *)_end; } -static inline int is_module_addr(void *addr) -{ -#ifdef CONFIG_64BIT - BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); - if (addr < (void *)MODULES_VADDR) - return 0; - if (addr > (void *)MODULES_END) - return 0; -#endif - return 1; -} - -static int __kprobes s390_get_insn_slot(struct kprobe *p) +static int s390_get_insn_slot(struct kprobe *p) { /* * Get an insn slot that is within the same 2GB area like the original @@ -111,8 +112,9 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p) p->ainsn.insn = get_insn_slot(); return p->ainsn.insn ? 0 : -ENOMEM; } +NOKPROBE_SYMBOL(s390_get_insn_slot); -static void __kprobes s390_free_insn_slot(struct kprobe *p) +static void s390_free_insn_slot(struct kprobe *p) { if (!p->ainsn.insn) return; @@ -122,8 +124,9 @@ static void __kprobes s390_free_insn_slot(struct kprobe *p) free_insn_slot(p->ainsn.insn, 0); p->ainsn.insn = NULL; } +NOKPROBE_SYMBOL(s390_free_insn_slot); -int __kprobes arch_prepare_kprobe(struct kprobe *p) +int arch_prepare_kprobe(struct kprobe *p) { if ((unsigned long) p->addr & 0x01) return -EINVAL; @@ -132,54 +135,79 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return -EINVAL; if (s390_get_insn_slot(p)) return -ENOMEM; - p->opcode = *p->addr; copy_instruction(p); return 0; } +NOKPROBE_SYMBOL(arch_prepare_kprobe); -struct ins_replace_args { - kprobe_opcode_t *ptr; - kprobe_opcode_t opcode; +int arch_check_ftrace_location(struct kprobe *p) +{ + return 0; +} + +struct swap_insn_args { + struct kprobe *p; + unsigned int arm_kprobe : 1; }; -static int __kprobes swap_instruction(void *aref) +static int swap_instruction(void *data) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long status = kcb->kprobe_status; - struct ins_replace_args *args = aref; - + struct swap_insn_args *args = data; + struct ftrace_insn new_insn, *insn; + struct kprobe *p = args->p; + size_t len; + + new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; + len = sizeof(new_insn.opc); + if (!p->ainsn.is_ftrace_insn) + goto skip_ftrace; + len = sizeof(new_insn); + insn = (struct ftrace_insn *) p->addr; + if (args->arm_kprobe) { + if (is_ftrace_nop(insn)) + new_insn.disp = KPROBE_ON_FTRACE_NOP; + else + new_insn.disp = KPROBE_ON_FTRACE_CALL; + } else { + ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); + if (insn->disp == KPROBE_ON_FTRACE_NOP) + ftrace_generate_nop_insn(&new_insn); + } +skip_ftrace: kcb->kprobe_status = KPROBE_SWAP_INST; - probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); + probe_kernel_write(p->addr, &new_insn, len); kcb->kprobe_status = status; return 0; } +NOKPROBE_SYMBOL(swap_instruction); -void __kprobes arch_arm_kprobe(struct kprobe *p) +void arch_arm_kprobe(struct kprobe *p) { - struct ins_replace_args args; + struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; - args.ptr = p->addr; - args.opcode = BREAKPOINT_INSTRUCTION; stop_machine(swap_instruction, &args, NULL); } +NOKPROBE_SYMBOL(arch_arm_kprobe); -void __kprobes arch_disarm_kprobe(struct kprobe *p) +void arch_disarm_kprobe(struct kprobe *p) { - struct ins_replace_args args; + struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; - args.ptr = p->addr; - args.opcode = p->opcode; stop_machine(swap_instruction, &args, NULL); } +NOKPROBE_SYMBOL(arch_disarm_kprobe); -void __kprobes arch_remove_kprobe(struct kprobe *p) +void arch_remove_kprobe(struct kprobe *p) { s390_free_insn_slot(p); } +NOKPROBE_SYMBOL(arch_remove_kprobe); -static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, - struct pt_regs *regs, - unsigned long ip) +static void enable_singlestep(struct kprobe_ctlblk *kcb, + struct pt_regs *regs, + unsigned long ip) { struct per_regs per_kprobe; @@ -199,10 +227,11 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); regs->psw.addr = ip | PSW_ADDR_AMODE; } +NOKPROBE_SYMBOL(enable_singlestep); -static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, - struct pt_regs *regs, - unsigned long ip) +static void disable_singlestep(struct kprobe_ctlblk *kcb, + struct pt_regs *regs, + unsigned long ip) { /* Restore control regs and psw mask, set new psw address */ __ctl_load(kcb->kprobe_saved_ctl, 9, 11); @@ -210,41 +239,43 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, regs->psw.mask |= kcb->kprobe_saved_imask; regs->psw.addr = ip | PSW_ADDR_AMODE; } +NOKPROBE_SYMBOL(disable_singlestep); /* * Activate a kprobe by storing its pointer to current_kprobe. The * previous kprobe is stored in kcb->prev_kprobe. A stack of up to * two kprobes can be active, see KPROBE_REENTER. */ -static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) +static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) { kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); kcb->prev_kprobe.status = kcb->kprobe_status; __this_cpu_write(current_kprobe, p); } +NOKPROBE_SYMBOL(push_kprobe); /* * Deactivate a kprobe by backing up to the previous state. If the * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, * for any other state prev_kprobe.kp will be NULL. */ -static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) +static void pop_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; } +NOKPROBE_SYMBOL(pop_kprobe); -void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs) +void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; /* Replace the return addr with trampoline addr */ regs->gprs[14] = (unsigned long) &kretprobe_trampoline; } +NOKPROBE_SYMBOL(arch_prepare_kretprobe); -static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, - struct kprobe *p) +static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: @@ -264,8 +295,9 @@ static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, BUG(); } } +NOKPROBE_SYMBOL(kprobe_reenter_check); -static int __kprobes kprobe_handler(struct pt_regs *regs) +static int kprobe_handler(struct pt_regs *regs) { struct kprobe_ctlblk *kcb; struct kprobe *p; @@ -339,6 +371,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) preempt_enable_no_resched(); return 0; } +NOKPROBE_SYMBOL(kprobe_handler); /* * Function return probe trampoline: @@ -355,8 +388,7 @@ static void __used kretprobe_trampoline_holder(void) /* * Called when the probe at kretprobe trampoline is hit */ -static int __kprobes trampoline_probe_handler(struct kprobe *p, - struct pt_regs *regs) +static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri; struct hlist_head *head, empty_rp; @@ -444,6 +476,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, */ return 1; } +NOKPROBE_SYMBOL(trampoline_probe_handler); /* * Called after single-stepping. p->addr is the address of the @@ -453,12 +486,30 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. */ -static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) +static void resume_execution(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; int fixup = probe_get_fixup_type(p->ainsn.insn); + /* Check if the kprobes location is an enabled ftrace caller */ + if (p->ainsn.is_ftrace_insn) { + struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; + struct ftrace_insn call_insn; + + ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); + /* + * A kprobe on an enabled ftrace call site actually single + * stepped an unconditional branch (ftrace nop equivalent). + * Now we need to fixup things and pretend that a brasl r0,... + * was executed instead. + */ + if (insn->disp == KPROBE_ON_FTRACE_CALL) { + ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; + regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); + } + } + if (fixup & FIXUP_PSW_NORMAL) ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; @@ -476,8 +527,9 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) disable_singlestep(kcb, regs, ip); } +NOKPROBE_SYMBOL(resume_execution); -static int __kprobes post_kprobe_handler(struct pt_regs *regs) +static int post_kprobe_handler(struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe *p = kprobe_running(); @@ -504,8 +556,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) return 1; } +NOKPROBE_SYMBOL(post_kprobe_handler); -static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) +static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe *p = kprobe_running(); @@ -567,8 +620,9 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) } return 0; } +NOKPROBE_SYMBOL(kprobe_trap_handler); -int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { int ret; @@ -579,12 +633,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); return ret; } +NOKPROBE_SYMBOL(kprobe_fault_handler); /* * Wrapper routine to for handling exceptions. */ -int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) +int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) { struct die_args *args = (struct die_args *) data; struct pt_regs *regs = args->regs; @@ -616,8 +671,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } +NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -635,13 +691,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); return 1; } +NOKPROBE_SYMBOL(setjmp_pre_handler); -void __kprobes jprobe_return(void) +void jprobe_return(void) { asm volatile(".word 0x0002"); } +NOKPROBE_SYMBOL(jprobe_return); -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long stack; @@ -655,6 +713,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) preempt_enable_no_resched(); return 1; } +NOKPROBE_SYMBOL(longjmp_break_handler); static struct kprobe trampoline = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, @@ -666,7 +725,8 @@ int __init arch_init_kprobes(void) return register_kprobe(&trampoline); } -int __kprobes arch_trampoline_kprobe(struct kprobe *p) +int arch_trampoline_kprobe(struct kprobe *p) { return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; } +NOKPROBE_SYMBOL(arch_trampoline_kprobe); diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 4300ea374826..b6dfc5bfcb89 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -27,6 +27,7 @@ ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller lgr %r1,%r15 + aghi %r0,MCOUNT_RETURN_FIXUP aghi %r15,-STACK_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index b878f12a9597..c3f8d157cb0d 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1383,7 +1383,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags) cpuhw->lsctl.ed = 1; /* Set in_use flag and store event */ - event->hw.idx = 0; /* only one sampling event per CPU supported */ cpuhw->event = event; cpuhw->flags |= PMU_F_IN_USE; diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index ed84cc224899..aa7a83948c7b 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -61,7 +61,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sf->gprs[8]; } -extern void __kprobes kernel_thread_starter(void); +extern void kernel_thread_starter(void); /* * Free current thread data structures etc.. @@ -153,6 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, save_fp_ctl(&p->thread.fp_regs.fpc); save_fp_regs(p->thread.fp_regs.fprs); p->thread.fp_regs.pad = 0; + p->thread.vxrs = NULL; /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { unsigned long tls = frame->childregs.gprs[6]; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 99a567b70d16..eabfb4594517 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -248,14 +248,27 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) */ tmp = 0; + } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { + /* + * floating point control reg. is in the thread structure + */ + tmp = child->thread.fp_regs.fpc; + tmp <<= BITS_PER_LONG - 32; + } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { - /* - * floating point regs. are stored in the thread structure + /* + * floating point regs. are either in child->thread.fp_regs + * or the child->thread.vxrs array */ - offset = addr - (addr_t) &dummy->regs.fp_regs; - tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); - if (addr == (addr_t) &dummy->regs.fp_regs.fpc) - tmp <<= BITS_PER_LONG - 32; + offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; +#ifdef CONFIG_64BIT + if (child->thread.vxrs) + tmp = *(addr_t *) + ((addr_t) child->thread.vxrs + 2*offset); + else +#endif + tmp = *(addr_t *) + ((addr_t) &child->thread.fp_regs.fprs + offset); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -383,16 +396,29 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) */ return 0; + } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { + /* + * floating point control reg. is in the thread structure + */ + if ((unsigned int) data != 0 || + test_fp_ctl(data >> (BITS_PER_LONG - 32))) + return -EINVAL; + child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32); + } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* - * floating point regs. are stored in the thread structure + * floating point regs. are either in child->thread.fp_regs + * or the child->thread.vxrs array */ - if (addr == (addr_t) &dummy->regs.fp_regs.fpc) - if ((unsigned int) data != 0 || - test_fp_ctl(data >> (BITS_PER_LONG - 32))) - return -EINVAL; - offset = addr - (addr_t) &dummy->regs.fp_regs; - *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; + offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; +#ifdef CONFIG_64BIT + if (child->thread.vxrs) + *(addr_t *)((addr_t) + child->thread.vxrs + 2*offset) = data; + else +#endif + *(addr_t *)((addr_t) + &child->thread.fp_regs.fprs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -611,12 +637,26 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) */ tmp = 0; + } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { + /* + * floating point control reg. is in the thread structure + */ + tmp = child->thread.fp_regs.fpc; + } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* - * floating point regs. are stored in the thread structure + * floating point regs. are either in child->thread.fp_regs + * or the child->thread.vxrs array */ - offset = addr - (addr_t) &dummy32->regs.fp_regs; - tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); + offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; +#ifdef CONFIG_64BIT + if (child->thread.vxrs) + tmp = *(__u32 *) + ((addr_t) child->thread.vxrs + 2*offset); + else +#endif + tmp = *(__u32 *) + ((addr_t) &child->thread.fp_regs.fprs + offset); } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* @@ -722,15 +762,28 @@ static int __poke_user_compat(struct task_struct *child, */ return 0; - } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { + } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { /* - * floating point regs. are stored in the thread structure + * floating point control reg. is in the thread structure */ - if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && - test_fp_ctl(tmp)) + if (test_fp_ctl(tmp)) return -EINVAL; - offset = addr - (addr_t) &dummy32->regs.fp_regs; - *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; + child->thread.fp_regs.fpc = data; + + } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { + /* + * floating point regs. are either in child->thread.fp_regs + * or the child->thread.vxrs array + */ + offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; +#ifdef CONFIG_64BIT + if (child->thread.vxrs) + *(__u32 *)((addr_t) + child->thread.vxrs + 2*offset) = tmp; + else +#endif + *(__u32 *)((addr_t) + &child->thread.fp_regs.fprs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* @@ -1038,12 +1091,6 @@ static int s390_tdb_set(struct task_struct *target, return 0; } -static int s390_vxrs_active(struct task_struct *target, - const struct user_regset *regset) -{ - return !!target->thread.vxrs; -} - static int s390_vxrs_low_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -1052,6 +1099,8 @@ static int s390_vxrs_low_get(struct task_struct *target, __u64 vxrs[__NUM_VXRS_LOW]; int i; + if (!MACHINE_HAS_VX) + return -ENODEV; if (target->thread.vxrs) { if (target == current) save_vx_regs(target->thread.vxrs); @@ -1070,6 +1119,8 @@ static int s390_vxrs_low_set(struct task_struct *target, __u64 vxrs[__NUM_VXRS_LOW]; int i, rc; + if (!MACHINE_HAS_VX) + return -ENODEV; if (!target->thread.vxrs) { rc = alloc_vector_registers(target); if (rc) @@ -1095,6 +1146,8 @@ static int s390_vxrs_high_get(struct task_struct *target, { __vector128 vxrs[__NUM_VXRS_HIGH]; + if (!MACHINE_HAS_VX) + return -ENODEV; if (target->thread.vxrs) { if (target == current) save_vx_regs(target->thread.vxrs); @@ -1112,6 +1165,8 @@ static int s390_vxrs_high_set(struct task_struct *target, { int rc; + if (!MACHINE_HAS_VX) + return -ENODEV; if (!target->thread.vxrs) { rc = alloc_vector_registers(target); if (rc) @@ -1196,7 +1251,6 @@ static const struct user_regset s390_regsets[] = { .n = __NUM_VXRS_LOW, .size = sizeof(__u64), .align = sizeof(__u64), - .active = s390_vxrs_active, .get = s390_vxrs_low_get, .set = s390_vxrs_low_set, }, @@ -1205,7 +1259,6 @@ static const struct user_regset s390_regsets[] = { .n = __NUM_VXRS_HIGH, .size = sizeof(__vector128), .align = sizeof(__vector128), - .active = s390_vxrs_active, .get = s390_vxrs_high_get, .set = s390_vxrs_high_set, }, @@ -1419,7 +1472,6 @@ static const struct user_regset s390_compat_regsets[] = { .n = __NUM_VXRS_LOW, .size = sizeof(__u64), .align = sizeof(__u64), - .active = s390_vxrs_active, .get = s390_vxrs_low_get, .set = s390_vxrs_low_set, }, @@ -1428,7 +1480,6 @@ static const struct user_regset s390_compat_regsets[] = { .n = __NUM_VXRS_HIGH, .size = sizeof(__vector128), .align = sizeof(__vector128), - .active = s390_vxrs_active, .get = s390_vxrs_high_get, .set = s390_vxrs_high_set, }, diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e80d9ff9a56d..4e532c67832f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -41,7 +41,6 @@ #include <linux/ctype.h> #include <linux/reboot.h> #include <linux/topology.h> -#include <linux/ftrace.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/memory.h> @@ -356,7 +355,6 @@ static void __init setup_lowcore(void) lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; - lc->ftrace_func = S390_lowcore.ftrace_func; restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); restart_stack += ASYNC_SIZE; diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 0c1a0ff0a558..6a2ac257d98f 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -371,7 +371,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; } else { /* Signal frame without vector registers are short ! */ - __u16 __user *svc = (void *) frame + frame_size - 2; + __u16 __user *svc = (void __user *) frame + frame_size - 2; if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) return -EFAULT; restorer = (unsigned long) svc | PSW_ADDR_AMODE; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6fd9e60101f1..0b499f5cbe19 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; - lc->ftrace_func = S390_lowcore.ftrace_func; lc->user_timer = lc->system_timer = lc->steal_timer = 0; __ctl_store(lc->cregs_save_area, 0, 15); save_access_regs((unsigned int *) lc->access_regs_save_area); diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 9f7087fd58de..a2987243bc76 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -360,3 +360,5 @@ SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) +SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) +SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 005d665fe4a5..20660dddb2d6 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -61,10 +61,11 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); /* * Scheduler clock - returns current time in nanosec units. */ -unsigned long long notrace __kprobes sched_clock(void) +unsigned long long notrace sched_clock(void) { return tod_to_ns(get_tod_clock_monotonic()); } +NOKPROBE_SYMBOL(sched_clock); /* * Monotonic_clock - returns # of nanoseconds passed since time_init() diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 9ff5ecba26ab..f081cf1157c3 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -49,7 +49,8 @@ static inline void report_user_fault(struct pt_regs *regs, int signr) return; if (!printk_ratelimit()) return; - printk("User process fault: interruption code 0x%X ", regs->int_code); + printk("User process fault: interruption code %04x ilc:%d ", + regs->int_code & 0xffff, regs->int_code >> 17); print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); printk("\n"); show_regs(regs); @@ -87,16 +88,16 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) } } -static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code, - char *str) +static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) { if (notify_die(DIE_TRAP, str, regs, 0, regs->int_code, si_signo) == NOTIFY_STOP) return; do_report_trap(regs, si_signo, si_code, str); } +NOKPROBE_SYMBOL(do_trap); -void __kprobes do_per_trap(struct pt_regs *regs) +void do_per_trap(struct pt_regs *regs) { siginfo_t info; @@ -111,6 +112,7 @@ void __kprobes do_per_trap(struct pt_regs *regs) (void __force __user *) current->thread.per_event.address; force_sig_info(SIGTRAP, &info, current); } +NOKPROBE_SYMBOL(do_per_trap); void default_trap_handler(struct pt_regs *regs) { @@ -151,8 +153,6 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, "privileged operation") DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, "special operation exception") -DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, - "translation exception") #ifdef CONFIG_64BIT DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, @@ -179,7 +179,13 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc) do_trap(regs, SIGFPE, si_code, "floating point exception"); } -void __kprobes illegal_op(struct pt_regs *regs) +void translation_exception(struct pt_regs *regs) +{ + /* May never happen. */ + die(regs, "Translation exception"); +} + +void illegal_op(struct pt_regs *regs) { siginfo_t info; __u8 opcode[6]; @@ -252,7 +258,7 @@ void __kprobes illegal_op(struct pt_regs *regs) if (signal) do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); } - +NOKPROBE_SYMBOL(illegal_op); #ifdef CONFIG_MATHEMU void specification_exception(struct pt_regs *regs) @@ -469,7 +475,7 @@ void space_switch_exception(struct pt_regs *regs) do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); } -void __kprobes kernel_stack_overflow(struct pt_regs * regs) +void kernel_stack_overflow(struct pt_regs *regs) { bust_spinlocks(1); printk("Kernel stack overflow.\n"); @@ -477,6 +483,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs) bust_spinlocks(0); panic("Corrupt kernel stack, can't continue."); } +NOKPROBE_SYMBOL(kernel_stack_overflow); void __init trap_init(void) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 55aade49b6d1..6b049ee75a56 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -271,7 +271,7 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) case KVM_S390_VM_MEM_CLR_CMMA: mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); - page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); + s390_reset_cmma(kvm->arch.gmap->mm); srcu_read_unlock(&kvm->srcu, idx); mutex_unlock(&kvm->lock); ret = 0; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 72bb2dd8b9cd..f47cb0c6d906 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -156,21 +156,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) return 0; } -static void __skey_check_enable(struct kvm_vcpu *vcpu) +static int __skey_check_enable(struct kvm_vcpu *vcpu) { + int rc = 0; if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) - return; + return rc; - s390_enable_skey(); + rc = s390_enable_skey(); trace_kvm_s390_skey_related_inst(vcpu); vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); + return rc; } static int handle_skey(struct kvm_vcpu *vcpu) { - __skey_check_enable(vcpu); + int rc = __skey_check_enable(vcpu); + if (rc) + return rc; vcpu->stat.instruction_storage_key++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) @@ -683,7 +687,10 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) } if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { - __skey_check_enable(vcpu); + int rc = __skey_check_enable(vcpu); + + if (rc) + return rc; if (set_guest_storage_key(current->mm, useraddr, vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index a2b81d6ce8a5..811937bb90be 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -261,8 +261,8 @@ static inline void report_user_fault(struct pt_regs *regs, long signr) return; if (!printk_ratelimit()) return; - printk(KERN_ALERT "User process fault: interruption code 0x%X ", - regs->int_code); + printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d", + regs->int_code & 0xffff, regs->int_code >> 17); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); printk(KERN_CONT "\n"); printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", @@ -548,7 +548,7 @@ out: return fault; } -void __kprobes do_protection_exception(struct pt_regs *regs) +void do_protection_exception(struct pt_regs *regs) { unsigned long trans_exc_code; int fault; @@ -574,8 +574,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs) if (unlikely(fault)) do_fault_error(regs, fault); } +NOKPROBE_SYMBOL(do_protection_exception); -void __kprobes do_dat_exception(struct pt_regs *regs) +void do_dat_exception(struct pt_regs *regs) { int access, fault; @@ -584,6 +585,7 @@ void __kprobes do_dat_exception(struct pt_regs *regs) if (unlikely(fault)) do_fault_error(regs, fault); } +NOKPROBE_SYMBOL(do_dat_exception); #ifdef CONFIG_PFAULT /* diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 3fef3b299665..426c9d462d1c 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) } } -void kernel_map_pages(struct page *page, int numpages, int enable) +void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long address; int nr, i, j; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 1b79ca67392f..71c7eff2c89f 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -18,6 +18,8 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/swapops.h> +#include <linux/ksm.h> +#include <linux/mman.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) break; /* Walk the process page table, lock and get pte pointer */ ptep = get_locked_pte(gmap->mm, addr, &ptl); - if (unlikely(!ptep)) - continue; + VM_BUG_ON(!ptep); /* Set notification bit in the pgste of the pte */ entry = *ptep; if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { @@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) gaddr += PAGE_SIZE; len -= PAGE_SIZE; } - spin_unlock(ptl); + pte_unmap_unlock(ptep, ptl); } up_read(&gmap->mm->mmap_sem); return rc; @@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table) __free_page(page); } -static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, unsigned long end, bool init_skey) -{ - pte_t *start_pte, *pte; - spinlock_t *ptl; - pgste_t pgste; - - start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - pte = start_pte; - do { - pgste = pgste_get_lock(pte); - pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; - if (init_skey) { - unsigned long address; - - pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | - PGSTE_GR_BIT | PGSTE_GC_BIT); - - /* skip invalid and not writable pages */ - if (pte_val(*pte) & _PAGE_INVALID || - !(pte_val(*pte) & _PAGE_WRITE)) { - pgste_set_unlock(pte, pgste); - continue; - } - - address = pte_val(*pte) & PAGE_MASK; - page_set_storage_key(address, PAGE_DEFAULT_KEY, 1); - } - pgste_set_unlock(pte, pgste); - } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap_unlock(start_pte, ptl); - - return addr; -} - -static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud, - unsigned long addr, unsigned long end, bool init_skey) -{ - unsigned long next; - pmd_t *pmd; - - pmd = pmd_offset(pud, addr); - do { - next = pmd_addr_end(addr, end); - if (pmd_none_or_clear_bad(pmd)) - continue; - next = page_table_reset_pte(mm, pmd, addr, next, init_skey); - } while (pmd++, addr = next, addr != end); - - return addr; -} - -static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd, - unsigned long addr, unsigned long end, bool init_skey) -{ - unsigned long next; - pud_t *pud; - - pud = pud_offset(pgd, addr); - do { - next = pud_addr_end(addr, end); - if (pud_none_or_clear_bad(pud)) - continue; - next = page_table_reset_pmd(mm, pud, addr, next, init_skey); - } while (pud++, addr = next, addr != end); - - return addr; -} - -void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, - unsigned long end, bool init_skey) -{ - unsigned long addr, next; - pgd_t *pgd; - - down_write(&mm->mmap_sem); - if (init_skey && mm_use_skey(mm)) - goto out_up; - addr = start; - pgd = pgd_offset(mm, addr); - do { - next = pgd_addr_end(addr, end); - if (pgd_none_or_clear_bad(pgd)) - continue; - next = page_table_reset_pud(mm, pgd, addr, next, init_skey); - } while (pgd++, addr = next, addr != end); - if (init_skey) - current->mm->context.use_skey = 1; -out_up: - up_write(&mm->mmap_sem); -} -EXPORT_SYMBOL(page_table_reset_pgste); - int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq) { @@ -992,11 +900,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) return NULL; } -void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, - unsigned long end, bool init_skey) -{ -} - static inline void page_table_free_pgste(unsigned long *table) { } @@ -1347,13 +1250,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie); * Enable storage key handling from now on and initialize the storage * keys with the default key. */ -void s390_enable_skey(void) +static int __s390_enable_skey(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) { - page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); + unsigned long ptev; + pgste_t pgste; + + pgste = pgste_get_lock(pte); + /* + * Remove all zero page mappings, + * after establishing a policy to forbid zero page mappings + * following faults for that page will get fresh anonymous pages + */ + if (is_zero_pfn(pte_pfn(*pte))) { + ptep_flush_direct(walk->mm, addr, pte); + pte_val(*pte) = _PAGE_INVALID; + } + /* Clear storage key */ + pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | + PGSTE_GR_BIT | PGSTE_GC_BIT); + ptev = pte_val(*pte); + if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) + page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); + pgste_set_unlock(pte, pgste); + return 0; +} + +int s390_enable_skey(void) +{ + struct mm_walk walk = { .pte_entry = __s390_enable_skey }; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int rc = 0; + + down_write(&mm->mmap_sem); + if (mm_use_skey(mm)) + goto out_up; + + mm->context.use_skey = 1; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (ksm_madvise(vma, vma->vm_start, vma->vm_end, + MADV_UNMERGEABLE, &vma->vm_flags)) { + mm->context.use_skey = 0; + rc = -ENOMEM; + goto out_up; + } + } + mm->def_flags &= ~VM_MERGEABLE; + + walk.mm = mm; + walk_page_range(0, TASK_SIZE, &walk); + +out_up: + up_write(&mm->mmap_sem); + return rc; } EXPORT_SYMBOL_GPL(s390_enable_skey); /* + * Reset CMMA state, make all pages stable again. + */ +static int __s390_reset_cmma(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pgste_t pgste; + + pgste = pgste_get_lock(pte); + pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; + pgste_set_unlock(pte, pgste); + return 0; +} + +void s390_reset_cmma(struct mm_struct *mm) +{ + struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; + + down_write(&mm->mmap_sem); + walk.mm = mm; + walk_page_range(0, TASK_SIZE, &walk); + up_write(&mm->mmap_sem); +} +EXPORT_SYMBOL_GPL(s390_reset_cmma); + +/* * Test and reset if a guest page is dirty */ bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index a9e1dc4ae442..805d8b29193a 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -3,4 +3,4 @@ # obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \ - pci_event.o pci_debug.o pci_insn.o + pci_event.o pci_debug.o pci_insn.o pci_mmio.o diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 2fa7b14b9c08..3290f11ae1d9 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock); static struct irq_chip zpci_irq_chip = { .name = "zPCI", - .irq_unmask = unmask_msi_irq, - .irq_mask = mask_msi_irq, + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, }; static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); @@ -369,8 +369,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; - msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); - msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI); + msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); /* Allocate adapter summary indicator bit */ rc = -EIO; @@ -403,7 +402,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) msg.data = hwirq; msg.address_lo = zdev->msi_addr & 0xffffffff; msg.address_hi = zdev->msi_addr >> 32; - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); airq_iv_set_data(zdev->aibv, hwirq, irq); hwirq++; } @@ -448,9 +447,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) /* Release MSI interrupts */ list_for_each_entry(msi, &pdev->msi_list, list) { if (msi->msi_attrib.is_msix) - default_msix_mask_irq(msi, 1); + __pci_msix_desc_mask_irq(msi, 1); else - default_msi_mask_irq(msi, 1, 1); + __pci_msi_desc_mask_irq(msi, 1, 1); irq_set_msi_desc(msi->irq, NULL); irq_free_desc(msi->irq); msi->msg.address_lo = 0; @@ -474,7 +473,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) len = pci_resource_len(pdev, i); if (!len) continue; - pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); + pdev->resource[i].start = + (resource_size_t __force) pci_iomap(pdev, i, 0); pdev->resource[i].end = pdev->resource[i].start + len - 1; } } @@ -489,7 +489,8 @@ static void zpci_unmap_resources(struct zpci_dev *zdev) len = pci_resource_len(pdev, i); if (!len) continue; - pci_iounmap(pdev, (void *) pdev->resource[i].start); + pci_iounmap(pdev, (void __iomem __force *) + pdev->resource[i].start); } } diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 6e22a247de9b..d6e411ed8b1f 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -62,6 +62,7 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev, zdev->tlb_refresh = response->refresh; zdev->dma_mask = response->dasm; zdev->msi_addr = response->msia; + zdev->max_msi = response->noi; zdev->fmb_update = response->mui; switch (response->version) { diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index eec598c5939f..3229a2e570df 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -158,10 +158,7 @@ int __init zpci_debug_init(void) void zpci_debug_exit(void) { - if (pci_debug_msg_id) - debug_unregister(pci_debug_msg_id); - if (pci_debug_err_id) - debug_unregister(pci_debug_err_id); - + debug_unregister(pci_debug_msg_id); + debug_unregister(pci_debug_err_id); debugfs_remove(debugfs_root); } diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c new file mode 100644 index 000000000000..62c5ea6d8682 --- /dev/null +++ b/arch/s390/pci/pci_mmio.c @@ -0,0 +1,115 @@ +/* + * Access to PCI I/O memory from user space programs. + * + * Copyright IBM Corp. 2014 + * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com> + */ +#include <linux/kernel.h> +#include <linux/syscalls.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/pci.h> + +static long get_pfn(unsigned long user_addr, unsigned long access, + unsigned long *pfn) +{ + struct vm_area_struct *vma; + long ret; + + down_read(¤t->mm->mmap_sem); + ret = -EINVAL; + vma = find_vma(current->mm, user_addr); + if (!vma) + goto out; + ret = -EACCES; + if (!(vma->vm_flags & access)) + goto out; + ret = follow_pfn(vma, user_addr, pfn); +out: + up_read(¤t->mm->mmap_sem); + return ret; +} + +SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, + const void __user *, user_buffer, size_t, length) +{ + u8 local_buf[64]; + void __iomem *io_addr; + void *buf; + unsigned long pfn; + long ret; + + if (!zpci_is_enabled()) + return -ENODEV; + + if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) + return -EINVAL; + if (length > 64) { + buf = kmalloc(length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } else + buf = local_buf; + + ret = get_pfn(mmio_addr, VM_WRITE, &pfn); + if (ret) + goto out; + io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); + + ret = -EFAULT; + if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) + goto out; + + if (copy_from_user(buf, user_buffer, length)) + goto out; + + memcpy_toio(io_addr, buf, length); + ret = 0; +out: + if (buf != local_buf) + kfree(buf); + return ret; +} + +SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, + void __user *, user_buffer, size_t, length) +{ + u8 local_buf[64]; + void __iomem *io_addr; + void *buf; + unsigned long pfn; + long ret; + + if (!zpci_is_enabled()) + return -ENODEV; + + if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) + return -EINVAL; + if (length > 64) { + buf = kmalloc(length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } else + buf = local_buf; + + ret = get_pfn(mmio_addr, VM_READ, &pfn); + if (ret) + goto out; + io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); + + ret = -EFAULT; + if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) + goto out; + + memcpy_fromio(buf, io_addr, length); + + if (copy_to_user(user_buffer, buf, length)) + goto out; + + ret = 0; +out: + if (buf != local_buf) + kfree(buf); + return ret; +} diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 46461c19f284..83ed116d414c 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -5,7 +5,6 @@ header-y += generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 244fb4c81e25..c6b6ee5f38b2 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -16,6 +16,7 @@ config SUPERH select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) + select ARCH_HAS_GCOV_PROFILE_ALL select PERF_USE_VMALLOC select HAVE_DEBUG_KMEMLEAK select HAVE_KERNEL_GZIP @@ -222,7 +223,6 @@ config CPU_SHX3 config ARCH_SHMOBILE bool select ARCH_SUSPEND_POSSIBLE - select PM select PM_RUNTIME config CPU_HAS_PMU diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 5620e33c18a0..d4b01d4cc102 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -338,7 +338,7 @@ static struct soc_camera_platform_info camera_info = { .format_name = "UYVY", .format_depth = 16, .format = { - .code = V4L2_MBUS_FMT_UYVY8_2X8, + .code = MEDIA_BUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_SMPTE170M, .field = V4L2_FIELD_NONE, .width = 640, diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 5a6c9acff0d2..654ebb6bd5d8 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -8,7 +8,6 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h generic-y += fcntl.h -generic-y += hash.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index e3abfd4277e2..53b8eeb1db20 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -59,7 +59,6 @@ static struct cpuidle_driver cpuidle_driver = { .exit_latency = 1, .target_residency = 1 * 2, .power_usage = 3, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = cpuidle_sleep_enter, .name = "C1", .desc = "SuperH Sleep Mode", @@ -68,7 +67,6 @@ static struct cpuidle_driver cpuidle_driver = { .exit_latency = 100, .target_residency = 1 * 2, .power_usage = 1, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = cpuidle_sleep_enter, .name = "C2", .desc = "SuperH Sleep Mode [SF]", @@ -78,7 +76,6 @@ static struct cpuidle_driver cpuidle_driver = { .exit_latency = 2300, .target_residency = 1 * 2, .power_usage = 1, - .flags = CPUIDLE_FLAG_TIME_VALID, .enter = cpuidle_sleep_enter, .name = "C3", .desc = "SuperH Mobile Standby Mode [SF]", diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 3d85225b9e95..bce52ba66206 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -31,7 +31,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) unsigned long bootmem_paddr; /* Don't allow bogus node assignment */ - BUG_ON(nid > MAX_NUMNODES || nid <= 0); + BUG_ON(nid >= MAX_NUMNODES || nid <= 0); start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index df922f52d76d..705408766ab0 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 888f6260b4ec..641f55cb61c3 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index 5162fad912ce..d1064e46efe8 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); -MODULE_ALIAS("crc32c"); +MODULE_ALIAS_CRYPTO("crc32c"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index 3065bc61f9d3..d11500972994 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); -MODULE_ALIAS("des"); +MODULE_ALIAS_CRYPTO("des"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index 09a9ea1dfb69..64c7ff5f72a9 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c @@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); -MODULE_ALIAS("md5"); +MODULE_ALIAS_CRYPTO("md5"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c index 6cd5f29e1e0d..1b3e47accc74 100644 --- a/arch/sparc/crypto/sha1_glue.c +++ b/arch/sparc/crypto/sha1_glue.c @@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c index 04f555ab2680..285268ca9279 100644 --- a/arch/sparc/crypto/sha256_glue.c +++ b/arch/sparc/crypto/sha256_glue.c @@ -135,7 +135,7 @@ static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) sha256_sparc64_final(desc, D); memcpy(hash, D, SHA224_DIGEST_SIZE); - memset(D, 0, SHA256_DIGEST_SIZE); + memzero_explicit(D, SHA256_DIGEST_SIZE); return 0; } @@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); -MODULE_ALIAS("sha224"); -MODULE_ALIAS("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); #include "crop_devid.c" diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c index f04d1994d19a..11eb36c3fc8c 100644 --- a/arch/sparc/crypto/sha512_glue.c +++ b/arch/sparc/crypto/sha512_glue.c @@ -139,7 +139,7 @@ static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash) sha512_sparc64_final(desc, D); memcpy(hash, D, 48); - memset(D, 0, 64); + memzero_explicit(D, 64); return 0; } @@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); -MODULE_ALIAS("sha384"); -MODULE_ALIAS("sha512"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); #include "crop_devid.c" diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index f5f94ce1692c..94f36e7086a7 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += hash.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += linkage.h diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 305dcc3dc721..76648941fea7 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,7 +37,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define read_barrier_depends() do { } while(0) +#define dma_rmb() rmb() +#define dma_wmb() wmb() + #define set_mb(__var, __value) \ do { __var = __value; membar_safe("#StoreLoad"); } while(0) @@ -51,7 +53,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define smp_wmb() __asm__ __volatile__("":::"memory") #endif -#define smp_read_barrier_depends() do { } while(0) +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) #define smp_store_release(p, v) \ do { \ diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h index 58ab64de25d2..6e9004aa6f25 100644 --- a/arch/sparc/include/asm/ldc.h +++ b/arch/sparc/include/asm/ldc.h @@ -61,6 +61,7 @@ void ldc_free(struct ldc_channel *lp); /* Register TX and RX queues of the link with the hypervisor. */ int ldc_bind(struct ldc_channel *lp); +void ldc_unbind(struct ldc_channel *lp); /* For non-RAW protocols we need to complete a handshake before * communication can proceed. ldc_connect() does that, if the diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index c55291e5b83e..f005ccac91cc 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -238,7 +238,6 @@ static const struct of_device_id ecpp_match[] = { static struct platform_driver ecpp_driver = { .driver = { .name = "ecpp", - .owner = THIS_MODULE, .of_match_table = ecpp_match, }, .probe = ecpp_probe, diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index bfeb626085ac..1ff9e7864168 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -667,6 +667,13 @@ static inline unsigned long pmd_pfn(pmd_t pmd) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline unsigned long pmd_dirty(pmd_t pmd) +{ + pte_t pte = __pte(pmd_val(pmd)); + + return pte_dirty(pte); +} + static inline unsigned long pmd_young(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d758c8d8f47d..8174f6cdbbbb 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -247,6 +247,25 @@ struct vio_net_desc { struct ldc_trans_cookie cookies[0]; }; +struct vio_net_dext { + u8 flags; +#define VNET_PKT_HASH 0x01 +#define VNET_PKT_HCK_IPV4_HDRCKSUM 0x02 +#define VNET_PKT_HCK_FULLCKSUM 0x04 +#define VNET_PKT_IPV4_LSO 0x08 +#define VNET_PKT_HCK_IPV4_HDRCKSUM_OK 0x10 +#define VNET_PKT_HCK_FULLCKSUM_OK 0x20 + + u8 vnet_hashval; + u16 ipv4_lso_mss; + u32 resv3; +}; + +static inline struct vio_net_dext *vio_net_ext(struct vio_net_desc *desc) +{ + return (struct vio_net_dext *)&desc->cookies[2]; +} + #define VIO_MAX_RING_COOKIES 24 struct vio_dring_state { @@ -281,6 +300,21 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr, ((dr->prod - dr->cons) & (ring_size - 1)) - 1); } +static inline u32 vio_dring_next(struct vio_dring_state *dr, u32 index) +{ + if (++index == dr->num_entries) + index = 0; + return index; +} + +static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index) +{ + if (index == 0) + return dr->num_entries - 1; + else + return index - 1; +} + #define VIO_MAX_TYPE_LEN 32 #define VIO_MAX_COMPAT_LEN 64 diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 54d9608681b6..e6a16c40be5f 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -76,6 +76,11 @@ #define SO_BPF_EXTENSIONS 0x0032 +#define SO_INCOMING_CPU 0x0033 + +#define SO_ATTACH_BPF 0x0034 +#define SO_DETACH_BPF SO_DETACH_FILTER + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index 46d83842eddc..6f35f4df17f2 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -415,8 +415,9 @@ #define __NR_getrandom 347 #define __NR_memfd_create 348 #define __NR_bpf 349 +#define __NR_execveat 350 -#define NR_syscalls 350 +#define NR_syscalls 351 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index eefda32b595e..742f6c4436bf 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -178,7 +178,6 @@ MODULE_DEVICE_TABLE(of, apc_match); static struct platform_driver apc_driver = { .driver = { .name = "apc", - .owner = THIS_MODULE, .of_match_table = apc_match, }, .probe = apc_probe, diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 86e55778e4af..086435c17981 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -135,7 +135,6 @@ static struct platform_driver auxio_driver = { .probe = auxio_probe, .driver = { .name = "auxio", - .owner = THIS_MODULE, .of_match_table = auxio_match, }, }; diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 052b5a44318f..4696958299e9 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -152,7 +152,6 @@ static struct platform_driver clock_board_driver = { .probe = clock_board_probe, .driver = { .name = "clock_board", - .owner = THIS_MODULE, .of_match_table = clock_board_match, }, }; @@ -257,7 +256,6 @@ static struct platform_driver fhc_driver = { .probe = fhc_probe, .driver = { .name = "fhc", - .owner = THIS_MODULE, .of_match_table = fhc_match, }, }; diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index dbb210d74e21..0de4bcb8261f 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -810,7 +810,6 @@ MODULE_DEVICE_TABLE(of, us3mc_match); static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", - .owner = THIS_MODULE, .of_match_table = us3mc_match, }, .probe = us3mc_probe, diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 4310332872d4..274a9f59d95c 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1222,11 +1222,12 @@ out_err: } EXPORT_SYMBOL(ldc_alloc); -void ldc_free(struct ldc_channel *lp) +void ldc_unbind(struct ldc_channel *lp) { if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { free_irq(lp->cfg.rx_irq, lp); free_irq(lp->cfg.tx_irq, lp); + lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; } if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { @@ -1240,10 +1241,15 @@ void ldc_free(struct ldc_channel *lp) lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; } - hlist_del(&lp->list); + ldc_set_state(lp, LDC_STATE_INIT); +} +EXPORT_SYMBOL(ldc_unbind); +void ldc_free(struct ldc_channel *lp) +{ + ldc_unbind(lp); + hlist_del(&lp->list); kfree(lp->mssbuf); - ldc_iommu_release(lp); kfree(lp); diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index c8bf26edfa7c..3382f7b3eeef 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -708,7 +708,6 @@ static struct of_device_id grpci1_of_match[] = { static struct platform_driver grpci1_of_driver = { .driver = { .name = "grpci1", - .owner = THIS_MODULE, .of_match_table = grpci1_of_match, }, .probe = grpci1_of_probe, diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index e433a4d69fe0..94e392bdee7d 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -900,7 +900,6 @@ static struct of_device_id grpci2_of_match[] = { static struct platform_driver grpci2_of_driver = { .driver = { .name = "grpci2", - .owner = THIS_MODULE, .of_match_table = grpci2_of_match, }, .probe = grpci2_of_probe, diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index ea2bad306f93..71e16f2241c2 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -368,7 +368,7 @@ static struct smp_funcall { unsigned long arg5; unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ -} ccall_info; +} ccall_info __attribute__((aligned(8))); static DEFINE_SPINLOCK(cross_call_lock); diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index e60fc6a67e9b..11a1f0d289d2 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c @@ -508,7 +508,6 @@ static const struct of_device_id fire_match[] = { static struct platform_driver fire_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = fire_match, }, .probe = fire_probe, diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 580651af73f2..84e16d81a6d8 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c @@ -111,10 +111,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) static struct irq_chip msi_irq = { .name = "PCI-MSI", - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, - .irq_enable = unmask_msi_irq, - .irq_disable = mask_msi_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, /* XXX affinity XXX */ }; @@ -161,7 +161,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p, msg.data = msi; irq_set_msi_desc(*irq_p, entry); - write_msi_msg(*irq_p, &msg); + pci_write_msi_msg(*irq_p, &msg); return 0; diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c index c647634ead2b..7dce27b3c761 100644 --- a/arch/sparc/kernel/pci_psycho.c +++ b/arch/sparc/kernel/pci_psycho.c @@ -604,7 +604,6 @@ static const struct of_device_id psycho_match[] = { static struct platform_driver psycho_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = psycho_match, }, .probe = psycho_probe, diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 6f00d27e8dac..00a616ffa35b 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c @@ -600,7 +600,6 @@ static const struct of_device_id sabre_match[] = { static struct platform_driver sabre_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = sabre_match, }, .probe = sabre_probe, diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index f9c6813c132d..c664d3e3aa8d 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -1495,7 +1495,6 @@ static const struct of_device_id schizo_match[] = { static struct platform_driver schizo_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = schizo_match, }, .probe = schizo_probe, diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 49d33b178793..47ddbd496a1e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -1010,7 +1010,6 @@ static const struct of_device_id pci_sun4v_match[] = { static struct platform_driver pci_sun4v_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = pci_sun4v_match, }, .probe = pci_sun4v_probe, diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 8b7297faca79..97d123107ecb 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c @@ -82,7 +82,6 @@ MODULE_DEVICE_TABLE(of, pmc_match); static struct platform_driver pmc_driver = { .driver = { .name = "pmc", - .owner = THIS_MODULE, .of_match_table = pmc_match, }, .probe = pmc_probe, diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index 4cb23c41553f..1836cb965ff8 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c @@ -63,7 +63,6 @@ static struct platform_driver power_driver = { .probe = power_probe, .driver = { .name = "power", - .owner = THIS_MODULE, .of_match_table = power_match, }, }; diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 33a17e7b3ccd..bb0008927598 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -6,6 +6,11 @@ sys64_execve: jmpl %g1, %g0 flushw +sys64_execveat: + set sys_execveat, %g1 + jmpl %g1, %g0 + flushw + #ifdef CONFIG_COMPAT sunos_execv: mov %g0, %o2 @@ -13,6 +18,11 @@ sys32_execve: set compat_sys_execve, %g1 jmpl %g1, %g0 flushw + +sys32_execveat: + set compat_sys_execveat, %g1 + jmpl %g1, %g0 + flushw #endif .align 32 diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index ad0cdf497b78..e31a9056a303 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -87,3 +87,4 @@ sys_call_table: /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf +/*350*/ .long sys_execveat diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 580cde9370c9..d72f76ae70eb 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -88,6 +88,7 @@ sys_call_table32: .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf +/*350*/ .word sys32_execveat #endif /* CONFIG_COMPAT */ @@ -167,3 +168,4 @@ sys_call_table: .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf +/*350*/ .word sys64_execveat diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 5923d1e4e7c9..2f80d23a0a44 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -322,7 +322,6 @@ static struct platform_driver clock_driver = { .probe = clock_probe, .driver = { .name = "rtc", - .owner = THIS_MODULE, .of_match_table = clock_match, }, }; diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 59da0c3ea788..edbbeb157d46 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -466,7 +466,6 @@ static struct platform_driver rtc_driver = { .probe = rtc_probe, .driver = { .name = "rtc", - .owner = THIS_MODULE, .of_match_table = rtc_match, }, }; @@ -499,7 +498,6 @@ static struct platform_driver bq4802_driver = { .probe = bq4802_probe, .driver = { .name = "bq4802", - .owner = THIS_MODULE, .of_match_table = bq4802_match, }, }; @@ -563,7 +561,6 @@ static struct platform_driver mostek_driver = { .probe = mostek_probe, .driver = { .name = "mostek", - .owner = THIS_MODULE, .of_match_table = mostek_match, }, }; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 2d91c62f7f5f..3ea267c53320 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1621,7 +1621,7 @@ static void __init kernel_physical_mapping_init(void) } #ifdef CONFIG_DEBUG_PAGEALLOC -void kernel_map_pages(struct page *page, int numpages, int enable) +void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 91de7dd7427f..37dc9364c4a1 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -218,7 +218,6 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index c7702b7ab7a5..76a2781dec2c 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -337,7 +337,6 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index 320ff5e6e61e..6f00e9850636 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -463,6 +463,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, (uint64_t)ts->tv_nsec, (uint64_t)cycles); } +EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp); int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, struct timespec *ts) @@ -485,11 +486,13 @@ int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, } return ret; } +EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp); int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta) { return gxio_mpipe_adjust_timestamp_aux(context, delta); } +EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp); /* Get our internal context used for link name access. This context is * special in that it is not associated with an mPIPE service domain. @@ -542,6 +545,7 @@ int gxio_mpipe_link_instance(const char *link_name) return gxio_mpipe_info_instance_aux(context, name); } +EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance); int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) { diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index e6462b8a6284..b4c488b65745 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -11,7 +11,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fb.h generic-y += fcntl.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index d372641054d9..6ef4ecab1df2 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -396,8 +396,7 @@ extern void ioport_unmap(void __iomem *addr); static inline long ioport_panic(void) { #ifdef __tilegx__ - panic("PCI IO space support is disabled. Configure the kernel with" - " CONFIG_TILE_PCI_IO to enable it"); + panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it"); #else panic("inb/outb and friends do not exist on tile"); #endif @@ -406,7 +405,7 @@ static inline long ioport_panic(void) static inline void __iomem *ioport_map(unsigned long port, unsigned int len) { - pr_info("ioport_map: mapping IO resources is unsupported on tile.\n"); + pr_info("ioport_map: mapping IO resources is unsupported on tile\n"); return NULL; } diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index 33587f16c152..5d1950788c69 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -235,9 +235,9 @@ static inline void __pte_clear(pte_t *ptep) #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) #define pte_ERROR(e) \ - pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) + pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e)) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) + pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e)) /* Return PA and protection info for a given kernel VA. */ int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index 2c8a9cd102d3..e96cec52f6d8 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -86,7 +86,7 @@ static inline int pud_huge_page(pud_t pud) } #define pmd_ERROR(e) \ - pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) + pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e)) static inline void pud_clear(pud_t *pudp) { diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h index 7757e1985fb6..d03b829857e8 100644 --- a/arch/tile/include/uapi/asm/ptrace.h +++ b/arch/tile/include/uapi/asm/ptrace.h @@ -52,12 +52,16 @@ typedef uint_reg_t pt_reg_t; * system call or exception. "struct sigcontext" has the same shape. */ struct pt_regs { - /* Saved main processor registers; 56..63 are special. */ - /* tp, sp, and lr must immediately follow regs[] for aliasing. */ - pt_reg_t regs[53]; - pt_reg_t tp; /* aliases regs[TREG_TP] */ - pt_reg_t sp; /* aliases regs[TREG_SP] */ - pt_reg_t lr; /* aliases regs[TREG_LR] */ + union { + /* Saved main processor registers; 56..63 are special. */ + pt_reg_t regs[56]; + struct { + pt_reg_t __regs[53]; + pt_reg_t tp; /* aliases regs[TREG_TP] */ + pt_reg_t sp; /* aliases regs[TREG_SP] */ + pt_reg_t lr; /* aliases regs[TREG_LR] */ + }; + }; /* Saved special registers. */ pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */ diff --git a/arch/tile/include/uapi/asm/sigcontext.h b/arch/tile/include/uapi/asm/sigcontext.h index 6348e59d3724..39ff5d1a232d 100644 --- a/arch/tile/include/uapi/asm/sigcontext.h +++ b/arch/tile/include/uapi/asm/sigcontext.h @@ -24,10 +24,16 @@ * but is simplified since we know the fault is from userspace. */ struct sigcontext { - __uint_reg_t gregs[53]; /* General-purpose registers. */ - __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */ - __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */ - __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */ + __extension__ union { + /* General-purpose registers. */ + __uint_reg_t gregs[56]; + __extension__ struct { + __uint_reg_t __gregs[53]; + __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */ + __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */ + __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */ + }; + }; __uint_reg_t pc; /* Program counter. */ __uint_reg_t ics; /* In Interrupt Critical Section? */ __uint_reg_t faultnum; /* Fault number. */ diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c index b608e00e7f6d..aefb2c086726 100644 --- a/arch/tile/kernel/early_printk.c +++ b/arch/tile/kernel/early_printk.c @@ -43,13 +43,20 @@ static struct console early_hv_console = { void early_panic(const char *fmt, ...) { - va_list ap; + struct va_format vaf; + va_list args; + arch_local_irq_disable_all(); - va_start(ap, fmt); - early_printk("Kernel panic - not syncing: "); - early_vprintk(fmt, ap); - early_printk("\n"); - va_end(ap); + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + early_printk("Kernel panic - not syncing: %pV", &vaf); + + va_end(args); + dump_stack(); hv_halt(); } diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index aca6000bca75..c4646bb99342 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) * to quiesce. */ if (rect->teardown_in_progress) { - pr_notice("cpu %d: detected %s hardwall violation %#lx" - " while teardown already in progress\n", + pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n", cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); goto done; @@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt, struct thread_struct *ts = &task->thread; if (cpumask_weight(&task->cpus_allowed) != 1) { - pr_err("pid %d (%s) releasing %s hardwall with" - " an affinity mask containing %d cpus!\n", + pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n", task->pid, task->comm, hwt->name, cpumask_weight(&task->cpus_allowed)); BUG(); diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index ba85765e1436..22044fc691ef 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - pr_emerg("tile_dev_intr: " - "stack overflow: %ld\n", - sp - sizeof(struct thread_info)); + pr_emerg("%s: stack overflow: %ld\n", + __func__, sp - sizeof(struct thread_info)); dump_stack(); } } diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c index 4cd88381a83e..ff5335ae050d 100644 --- a/arch/tile/kernel/kgdb.c +++ b/arch/tile/kernel/kgdb.c @@ -125,9 +125,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { - int reg; struct pt_regs *thread_regs; - unsigned long *ptr = gdb_regs; if (task == NULL) return; @@ -136,9 +134,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) memset(gdb_regs, 0, NUMREGBYTES); thread_regs = task_pt_regs(task); - for (reg = 0; reg <= TREG_LAST_GPR; reg++) - *(ptr++) = thread_regs->regs[reg]; - + memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long)); gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc; gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum; } diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c index 27cdcacbe81d..f8a45c51e9e4 100644 --- a/arch/tile/kernel/kprobes.c +++ b/arch/tile/kernel/kprobes.c @@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return -EINVAL; if (insn_has_control(*p->addr)) { - pr_notice("Kprobes for control instructions are not " - "supported\n"); + pr_notice("Kprobes for control instructions are not supported\n"); return -EINVAL; } diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c index f0b54a934712..008aa2faef55 100644 --- a/arch/tile/kernel/machine_kexec.c +++ b/arch/tile/kernel/machine_kexec.c @@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs) int machine_kexec_prepare(struct kimage *image) { if (num_online_cpus() > 1) { - pr_warning("%s: detected attempt to kexec " - "with num_online_cpus() > 1\n", - __func__); + pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n", + __func__); return -ENOSYS; } if (image->type != KEXEC_TYPE_DEFAULT) { - pr_warning("%s: detected attempt to kexec " - "with unsupported type: %d\n", - __func__, - image->type); + pr_warn("%s: detected attempt to kexec with unsupported type: %d\n", + __func__, image->type); return -ENOSYS; } return 0; @@ -131,8 +128,8 @@ static unsigned char *kexec_bn2cl(void *pg) */ csum = ip_compute_csum(pg, bhdrp->b_size); if (csum != 0) { - pr_warning("%s: bad checksum %#x (size %d)\n", - __func__, csum, bhdrp->b_size); + pr_warn("%s: bad checksum %#x (size %d)\n", + __func__, csum, bhdrp->b_size); return 0; } @@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg) while (*desc != '\0') { desc++; if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { - pr_info("%s: ran off end of page\n", - __func__); + pr_info("%s: ran off end of page\n", __func__); return 0; } } @@ -195,20 +191,18 @@ static void kexec_find_and_set_command_line(struct kimage *image) } if (command_line != 0) { - pr_info("setting new command line to \"%s\"\n", - command_line); + pr_info("setting new command line to \"%s\"\n", command_line); hverr = hv_set_command_line( (HV_VirtAddr) command_line, strlen(command_line)); kunmap_atomic(command_line); } else { - pr_info("%s: no command line found; making empty\n", - __func__); + pr_info("%s: no command line found; making empty\n", __func__); hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); } if (hverr) - pr_warning("%s: hv_set_command_line returned error: %d\n", - __func__, hverr); + pr_warn("%s: hv_set_command_line returned error: %d\n", + __func__, hverr); } /* diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index ac950be1318e..7475af3aacec 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c @@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum) { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - pr_emerg("hv_message_intr: " - "stack overflow: %ld\n", - sp - sizeof(struct thread_info)); + pr_emerg("%s: stack overflow: %ld\n", + __func__, sp - sizeof(struct thread_info)); dump_stack(); } } diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index d19b13e3a59f..96447c9160a0 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c @@ -96,8 +96,8 @@ void module_free(struct module *mod, void *module_region) static int validate_hw2_last(long value, struct module *me) { if (((value << 16) >> 16) != value) { - pr_warning("module %s: Out of range HW2_LAST value %#lx\n", - me->name, value); + pr_warn("module %s: Out of range HW2_LAST value %#lx\n", + me->name, value); return 0; } return 1; @@ -210,10 +210,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, value -= (unsigned long) location; /* pc-relative */ value = (long) value >> 3; /* count by instrs */ if (!validate_jumpoff(value)) { - pr_warning("module %s: Out of range jump to" - " %#llx at %#llx (%p)\n", me->name, - sym->st_value + rel[i].r_addend, - rel[i].r_offset, location); + pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n", + me->name, + sym->st_value + rel[i].r_addend, + rel[i].r_offset, location); return -ENOEXEC; } MUNGE(create_JumpOff_X1); diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 1f80a88c75a6..f70c7892fa25 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -178,8 +178,8 @@ int __init tile_pci_init(void) continue; hv_cfg_fd1 = tile_pcie_open(i, 1); if (hv_cfg_fd1 < 0) { - pr_err("PCI: Couldn't open config fd to HV " - "for controller %d\n", i); + pr_err("PCI: Couldn't open config fd to HV for controller %d\n", + i); goto err_cont; } @@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) for (i = 0; i < 6; i++) { r = &dev->resource[i]; if (r->flags & IORESOURCE_UNSET) { - pr_err("PCI: Device %s not available " - "because of resource collisions\n", + pr_err("PCI: Device %s not available because of resource collisions\n", pci_name(dev)); return -EINVAL; } diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index e39f9c542807..2c95f37ebbed 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq) count = cpumask_weight(&intr_cpus_map); if (unlikely(count == 0)) { - pr_warning("intr_cpus_map empty, interrupts will be" - " delievered to dataplane tiles\n"); + pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n"); return irq % (smp_height * smp_width); } @@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index) /* Get the properties of the PCIe ports on this TRIO instance. */ ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]); if (ret < 0) { - pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," - " on TRIO %d\n", ret, trio_index); + pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n", + ret, trio_index); goto get_port_property_failure; } context->mmio_base_mac = iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); if (context->mmio_base_mac == NULL) { - pr_err("PCI: TRIO config space mapping failure, error %d," - " on TRIO %d\n", ret, trio_index); + pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n", + ret, trio_index); ret = -ENOMEM; goto trio_mmio_mapping_failure; @@ -622,9 +621,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller) dev_control.max_read_req_sz, mac); if (err < 0) { - pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " - "MAC %d on TRIO %d\n", - mac, controller->trio_index); + pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n", + mac, controller->trio_index); } } @@ -720,27 +718,24 @@ int __init pcibios_init(void) reg_offset); if (!port_status.dl_up) { if (rc_delay[trio_index][mac]) { - pr_info("Delaying PCIe RC TRIO init %d sec" - " on MAC %d on TRIO %d\n", + pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n", rc_delay[trio_index][mac], mac, trio_index); msleep(rc_delay[trio_index][mac] * 1000); } ret = gxio_trio_force_rc_link_up(trio_context, mac); if (ret < 0) - pr_err("PCI: PCIE_FORCE_LINK_UP failure, " - "MAC %d on TRIO %d\n", mac, trio_index); + pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n", + mac, trio_index); } - pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, - trio_index, controller->mac); + pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", + i, trio_index, controller->mac); /* Delay the bus probe if needed. */ if (rc_delay[trio_index][mac]) { - pr_info("Delaying PCIe RC bus enumerating %d sec" - " on MAC %d on TRIO %d\n", - rc_delay[trio_index][mac], mac, - trio_index); + pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n", + rc_delay[trio_index][mac], mac, trio_index); msleep(rc_delay[trio_index][mac] * 1000); } else { /* @@ -758,11 +753,10 @@ int __init pcibios_init(void) if (pcie_ports[trio_index].ports[mac].removable) { pr_info("PCI: link is down, MAC %d on TRIO %d\n", mac, trio_index); - pr_info("This is expected if no PCIe card" - " is connected to this link\n"); + pr_info("This is expected if no PCIe card is connected to this link\n"); } else pr_err("PCI: link is down, MAC %d on TRIO %d\n", - mac, trio_index); + mac, trio_index); continue; } @@ -829,8 +823,8 @@ int __init pcibios_init(void) /* Alloc a PIO region for PCI config access per MAC. */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: PCI CFG PIO alloc failure for mac %d " - "on TRIO %d, give up\n", mac, trio_index); + pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n", + mac, trio_index); continue; } @@ -842,8 +836,8 @@ int __init pcibios_init(void) trio_context->pio_cfg_index[mac], mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); if (ret < 0) { - pr_err("PCI: PCI CFG PIO init failure for mac %d " - "on TRIO %d, give up\n", mac, trio_index); + pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n", + mac, trio_index); continue; } @@ -865,7 +859,7 @@ int __init pcibios_init(void) (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1))); if (trio_context->mmio_base_pio_cfg[mac] == NULL) { pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", - mac, trio_index); + mac, trio_index); continue; } @@ -925,9 +919,8 @@ int __init pcibios_init(void) /* Alloc a PIO region for PCI memory access for each RC port. */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -944,9 +937,8 @@ int __init pcibios_init(void) 0, 0); if (ret < 0) { - pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -957,9 +949,8 @@ int __init pcibios_init(void) */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -976,9 +967,8 @@ int __init pcibios_init(void) 0, HV_TRIO_PIO_FLAG_IO_SPACE); if (ret < 0) { - pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -997,10 +987,9 @@ int __init pcibios_init(void) ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: Mem-Map alloc failure on TRIO %d " - "mac %d for MC %d, give up\n", - controller->trio_index, - controller->mac, j); + pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n", + controller->trio_index, controller->mac, + j); goto alloc_mem_map_failed; } @@ -1030,10 +1019,9 @@ int __init pcibios_init(void) j, GXIO_TRIO_ORDER_MODE_UNORDERED); if (ret < 0) { - pr_err("PCI: Mem-Map init failure on TRIO %d " - "mac %d for MC %d, give up\n", - controller->trio_index, - controller->mac, j); + pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n", + controller->trio_index, controller->mac, + j); goto alloc_mem_map_failed; } @@ -1453,7 +1441,7 @@ static struct pci_ops tile_cfg_ops = { static unsigned int tilegx_msi_startup(struct irq_data *d) { if (d->msi_desc) - unmask_msi_irq(d); + pci_msi_unmask_irq(d); return 0; } @@ -1465,14 +1453,14 @@ static void tilegx_msi_ack(struct irq_data *d) static void tilegx_msi_mask(struct irq_data *d) { - mask_msi_irq(d); + pci_msi_mask_irq(d); __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); } static void tilegx_msi_unmask(struct irq_data *d) { __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); - unmask_msi_irq(d); + pci_msi_unmask_irq(d); } static struct irq_chip tilegx_msi_chip = { @@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) * Most PCIe endpoint devices do support 64-bit message addressing. */ if (desc->msi_attrib.is_64 == 0) { - dev_printk(KERN_INFO, &pdev->dev, - "64-bit MSI message address not supported, " - "falling back to legacy interrupts.\n"); + dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n"); ret = -ENOMEM; goto is_64_failure; @@ -1549,11 +1535,8 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) /* SQ regions are out, allocate from map mem regions. */ mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); if (mem_map < 0) { - dev_printk(KERN_INFO, &pdev->dev, - "%s Mem-Map alloc failure. " - "Failed to initialize MSI interrupts. " - "Falling back to legacy interrupts.\n", - desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); + dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n", + desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); ret = -ENOMEM; goto msi_mem_map_alloc_failure; } @@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) mem_map, mem_map_base, mem_map_limit, trio_context->asid); if (ret < 0) { - dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); + dev_info(&pdev->dev, "HV MSI config failed\n"); goto hv_msi_config_failure; } @@ -1590,7 +1573,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) msg.address_hi = msi_addr >> 32; msg.address_lo = msi_addr & 0xffffffff; - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); irq_set_handler_data(irq, controller); diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0050cbc1d9de..48e5773dd0b7 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -52,7 +52,7 @@ static int __init idle_setup(char *str) return -EINVAL; if (!strcmp(str, "poll")) { - pr_info("using polling idle threads.\n"); + pr_info("using polling idle threads\n"); cpu_idle_poll_ctrl(true); return 0; } else if (!strcmp(str, "halt")) { @@ -547,27 +547,25 @@ void show_regs(struct pt_regs *regs) struct task_struct *tsk = validate_current(); int i; - pr_err("\n"); if (tsk != &corrupt_current) show_regs_print_info(KERN_ERR); #ifdef __tilegx__ for (i = 0; i < 17; i++) - pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", + pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", i, regs->regs[i], i+18, regs->regs[i+18], i+36, regs->regs[i+36]); - pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n", + pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n", regs->regs[17], regs->regs[35], regs->tp); - pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); + pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr); #else for (i = 0; i < 13; i++) - pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT - " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", + pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", i, regs->regs[i], i+14, regs->regs[i+14], i+27, regs->regs[i+27], i+40, regs->regs[i+40]); - pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", + pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n", regs->regs[13], regs->tp, regs->sp, regs->lr); #endif - pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", + pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n", regs->pc, regs->ex1, regs->faultnum); dump_stack_regs(regs); diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index b9736ded06f2..864eea69556d 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str) maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used to no more than %dMB\n", - maxmem_pfn >> (20 - PAGE_SHIFT)); + maxmem_pfn >> (20 - PAGE_SHIFT)); return 0; } early_param("maxmem", setup_maxmem); @@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str) maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used on node %ld to no more than %dMB\n", - node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); + node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); return 0; } early_param("maxnodemem", setup_maxnodemem); @@ -417,8 +417,7 @@ static void __init setup_memory(void) range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; range.size -= (range.start - start_pa); range.size &= HPAGE_MASK; - pr_err("Range not hugepage-aligned: %#llx..%#llx:" - " now %#llx-%#llx\n", + pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n", start_pa, start_pa + orig_size, range.start, range.start + range.size); } @@ -437,8 +436,8 @@ static void __init setup_memory(void) if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { int max_size = maxnodemem_pfn[i]; if (max_size > 0) { - pr_err("Maxnodemem reduced node %d to" - " %d pages\n", i, max_size); + pr_err("Maxnodemem reduced node %d to %d pages\n", + i, max_size); range.size = PFN_PHYS(max_size); } else { pr_err("Maxnodemem disabled node %d\n", i); @@ -490,8 +489,8 @@ static void __init setup_memory(void) NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); if (end < pci_reserve_end_pfn + percpu_pages) { end = pci_reserve_start_pfn; - pr_err("PCI mapping region reduced node %d to" - " %ld pages\n", i, end - start); + pr_err("PCI mapping region reduced node %d to %ld pages\n", + i, end - start); } } #endif @@ -534,11 +533,10 @@ static void __init setup_memory(void) } } physpages -= dropped_pages; - pr_warning("Only using %ldMB memory;" - " ignoring %ldMB.\n", - physpages >> (20 - PAGE_SHIFT), - dropped_pages >> (20 - PAGE_SHIFT)); - pr_warning("Consider using a larger page size.\n"); + pr_warn("Only using %ldMB memory - ignoring %ldMB\n", + physpages >> (20 - PAGE_SHIFT), + dropped_pages >> (20 - PAGE_SHIFT)); + pr_warn("Consider using a larger page size\n"); } #endif @@ -556,25 +554,23 @@ static void __init setup_memory(void) MAXMEM_PFN : mappable_physpages; highmem_pages = (long) (physpages - lowmem_pages); - pr_notice("%ldMB HIGHMEM available.\n", - pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); - pr_notice("%ldMB LOWMEM available.\n", - pages_to_mb(lowmem_pages)); + pr_notice("%ldMB HIGHMEM available\n", + pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); + pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages)); #else /* Set max_low_pfn based on what node 0 can directly address. */ max_low_pfn = node_end_pfn[0]; #ifndef __tilegx__ if (node_end_pfn[0] > MAXMEM_PFN) { - pr_warning("Only using %ldMB LOWMEM.\n", - MAXMEM>>20); - pr_warning("Use a HIGHMEM enabled kernel.\n"); + pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20); + pr_warn("Use a HIGHMEM enabled kernel\n"); max_low_pfn = MAXMEM_PFN; max_pfn = MAXMEM_PFN; node_end_pfn[0] = MAXMEM_PFN; } else { - pr_notice("%ldMB memory available.\n", - pages_to_mb(node_end_pfn[0])); + pr_notice("%ldMB memory available\n", + pages_to_mb(node_end_pfn[0])); } for (i = 1; i < MAX_NUMNODES; ++i) { node_start_pfn[i] = 0; @@ -589,8 +585,7 @@ static void __init setup_memory(void) if (pages) high_memory = pfn_to_kaddr(node_end_pfn[i]); } - pr_notice("%ldMB memory available.\n", - pages_to_mb(lowmem_pages)); + pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages)); #endif #endif } @@ -1112,8 +1107,8 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { if (set_initramfs_file) { - pr_warning("No such hvfs initramfs file '%s'\n", - initramfs_file); + pr_warn("No such hvfs initramfs file '%s'\n", + initramfs_file); return; } else { /* Try old backwards-compatible name. */ @@ -1126,8 +1121,8 @@ static void __init load_hv_initrd(void) stat = hv_fs_fstat(fd); BUG_ON(stat.size < 0); if (stat.flags & HV_FS_ISDIR) { - pr_warning("Ignoring hvfs file '%s': it's a directory.\n", - initramfs_file); + pr_warn("Ignoring hvfs file '%s': it's a directory\n", + initramfs_file); return; } initrd = alloc_bootmem_pages(stat.size); @@ -1185,9 +1180,8 @@ static void __init validate_hv(void) HV_Topology topology = hv_inquire_topology(); BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); if (topology.width != 1 || topology.height != 1) { - pr_warning("Warning: booting UP kernel on %dx%d grid;" - " will ignore all but first tile.\n", - topology.width, topology.height); + pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n", + topology.width, topology.height); } #endif @@ -1208,9 +1202,8 @@ static void __init validate_hv(void) * We use a struct cpumask for this, so it must be big enough. */ if ((smp_height * smp_width) > nr_cpu_ids) - early_panic("Hypervisor %d x %d grid too big for Linux" - " NR_CPUS %d\n", smp_height, smp_width, - nr_cpu_ids); + early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n", + smp_height, smp_width, nr_cpu_ids); #endif /* @@ -1265,10 +1258,9 @@ static void __init validate_va(void) /* Kernel PCs must have their high bit set; see intvec.S. */ if ((long)VMALLOC_START >= 0) - early_panic( - "Linux VMALLOC region below the 2GB line (%#lx)!\n" - "Reconfigure the kernel with smaller VMALLOC_RESERVE.\n", - VMALLOC_START); + early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n" + "Reconfigure the kernel with smaller VMALLOC_RESERVE\n", + VMALLOC_START); #endif } @@ -1395,7 +1387,7 @@ static void __init setup_cpu_maps(void) static int __init dataplane(char *str) { - pr_warning("WARNING: dataplane support disabled in this kernel\n"); + pr_warn("WARNING: dataplane support disabled in this kernel\n"); return 0; } @@ -1413,8 +1405,8 @@ void __init setup_arch(char **cmdline_p) len = hv_get_command_line((HV_VirtAddr) boot_command_line, COMMAND_LINE_SIZE); if (boot_command_line[0]) - pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", - boot_command_line); + pr_warn("WARNING: ignoring dynamic command line \"%s\"\n", + boot_command_line); strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); #else char *hv_cmdline; @@ -1540,8 +1532,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr) BUG_ON(pgd_addr_invalid(addr)); if (addr < VMALLOC_START || addr >= VMALLOC_END) - panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" - " try increasing CONFIG_VMALLOC_RESERVE\n", + panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n", addr, VMALLOC_START, VMALLOC_END); pgd = swapper_pg_dir + pgd_index(addr); @@ -1596,8 +1587,8 @@ void __init setup_per_cpu_areas(void) lowmem_va = (unsigned long)pfn_to_kaddr(pfn); ptep = virt_to_kpte(lowmem_va); if (pte_huge(*ptep)) { - printk(KERN_DEBUG "early shatter of huge page" - " at %#lx\n", lowmem_va); + printk(KERN_DEBUG "early shatter of huge page at %#lx\n", + lowmem_va); shatter_pmd((pmd_t *)ptep); ptep = virt_to_kpte(lowmem_va); BUG_ON(pte_huge(*ptep)); diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 7c2fecc52177..bb0a9ce7ae23 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -45,8 +45,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { - int err = 0; - int i; + int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; @@ -57,9 +56,7 @@ int restore_sigcontext(struct pt_regs *regs, */ BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs)); BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0); - - for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) - err |= __get_user(regs->regs[i], &sc->gregs[i]); + err = __copy_from_user(regs, sc, sizeof(*regs)); /* Ensure that the PL is always set to USER_PL. */ regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1)); @@ -110,12 +107,7 @@ badframe: int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { - int i, err = 0; - - for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) - err |= __put_user(regs->regs[i], &sc->gregs[i]); - - return err; + return __copy_to_user(sc, regs, sizeof(*regs)); } /* @@ -345,7 +337,6 @@ static void dump_mem(void __user *address) int i, j, k; int found_readable_mem = 0; - pr_err("\n"); if (!access_ok(VERIFY_READ, address, 1)) { pr_err("Not dumping at address 0x%lx (kernel address)\n", (unsigned long)address); @@ -367,7 +358,7 @@ static void dump_mem(void __user *address) (unsigned long)address); found_readable_mem = 1; } - j = sprintf(line, REGFMT":", (unsigned long)addr); + j = sprintf(line, REGFMT ":", (unsigned long)addr); for (k = 0; k < bytes_per_line; ++k) j += sprintf(&line[j], " %02x", buf[k]); pr_err("%s\n", line); @@ -411,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs, case SIGFPE: case SIGSEGV: case SIGBUS: - pr_err("User crash: signal %d," - " trap %ld, address 0x%lx\n", + pr_err("User crash: signal %d, trap %ld, address 0x%lx\n", sig, regs->faultnum, address); show_regs(regs); dump_mem((void __user *)address); diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index 6cb2ce31b5a2..862973074bf9 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned( } if (unaligned_printk || unaligned_fixup_count == 0) { - pr_info("Process %d/%s: PC %#lx: Fixup of" - " unaligned %s at %#lx.\n", + pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n", current->pid, current->comm, regs->pc, - (mem_op == MEMOP_LOAD || - mem_op == MEMOP_LOAD_POSTINCR) ? + mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ? "load" : "store", (unsigned long)addr); if (!unaligned_printk) { diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 0d59a1b60c74..20d52a98e171 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -127,8 +127,7 @@ static __init int reset_init_affinity(void) { long rc = sched_setaffinity(current->pid, &init_affinity); if (rc != 0) - pr_warning("couldn't reset init affinity (%ld)\n", - rc); + pr_warn("couldn't reset init affinity (%ld)\n", rc); return 0; } late_initcall(reset_init_affinity); @@ -174,7 +173,7 @@ static void start_secondary(void) /* Indicate that we're ready to come up. */ /* Must not do this before we're ready to receive messages */ if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { - pr_warning("CPU#%d already started!\n", cpuid); + pr_warn("CPU#%d already started!\n", cpuid); for (;;) local_irq_enable(); } diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index c93977a62116..7ff5afdbd3aa 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) * then bust_spinlocks() spit out a space in front of us * and it will mess up our KERN_ERR. */ - pr_err("\n"); - pr_err("Starting stack dump of tid %d, pid %d (%s)" - " on cpu %d at cycle %lld\n", + pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n", kbt->task->pid, kbt->task->tgid, kbt->task->comm, raw_smp_processor_id(), get_cycles()); } @@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) i++, address, namebuf, (unsigned long)(kbt->it.sp)); if (i >= 100) { - pr_err("Stack dump truncated" - " (%d frames)\n", i); + pr_err("Stack dump truncated (%d frames)\n", i); break; } } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index b854a1cd0079..d412b0856c0a 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -98,8 +98,8 @@ void __init calibrate_delay(void) { loops_per_jiffy = get_clock_rate() / HZ; pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", - loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); } /* Called fairly late in init/main.c, but before we go smp. */ diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 86900ccd4977..bf841ca517bb 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -46,9 +46,9 @@ static int __init setup_unaligned_fixup(char *str) return 0; pr_info("Fixups for unaligned data accesses are %s\n", - unaligned_fixup >= 0 ? - (unaligned_fixup ? "enabled" : "disabled") : - "completely disabled"); + unaligned_fixup >= 0 ? + (unaligned_fixup ? "enabled" : "disabled") : + "completely disabled"); return 1; } __setup("unaligned_fixup=", setup_unaligned_fixup); @@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, case INT_ILL: if (copy_from_user(&instr, (void __user *)regs->pc, sizeof(instr))) { - pr_err("Unreadable instruction for INT_ILL:" - " %#lx\n", regs->pc); + pr_err("Unreadable instruction for INT_ILL: %#lx\n", + regs->pc); do_exit(SIGKILL); return; } diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index c02ea2a45f67..7d9a83be0aca 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, unaligned_fixup_count++; if (unaligned_printk) { - pr_info("%s/%d. Unalign fixup for kernel access " - "to userspace %lx.", + pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n", current->comm, current->pid, regs->regs[ra]); } @@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, .si_addr = (unsigned char __user *)0 }; if (unaligned_printk) - pr_info("Unalign bundle: unexp @%llx, %llx", + pr_info("Unalign bundle: unexp @%llx, %llx\n", (unsigned long long)regs->pc, (unsigned long long)bundle); @@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, frag.bundle = bundle; if (unaligned_printk) { - pr_info("%s/%d, Unalign fixup: pc=%lx " - "bundle=%lx %d %d %d %d %d %d %d %d.", + pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n", current->comm, current->pid, (unsigned long)frag.pc, (unsigned long)frag.bundle, @@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, (int)y1_lr, (int)y1_br, (int)x1_add); for (k = 0; k < n; k += 2) - pr_info("[%d] %016llx %016llx", k, - (unsigned long long)frag.insn[k], + pr_info("[%d] %016llx %016llx\n", + k, (unsigned long long)frag.insn[k], (unsigned long long)frag.insn[k+1]); } @@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, .si_addr = (void __user *)&jit_code_area[idx] }; - pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx", + pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n", current->pid, current->comm, (unsigned long long)&jit_code_area[idx]); @@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum) /* If exception came from kernel, try fix it up. */ if (fixup_exception(regs)) { if (unaligned_printk) - pr_info("Unalign fixup: %d %llx @%llx", + pr_info("Unalign fixup: %d %llx @%llx\n", (int)unaligned_fixup, (unsigned long long)regs->ex1, (unsigned long long)regs->pc); @@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum) }; if (unaligned_printk) - pr_info("Unalign fixup: %d %llx @%llx", + pr_info("Unalign fixup: %d %llx @%llx\n", (int)unaligned_fixup, (unsigned long long)regs->ex1, (unsigned long long)regs->pc); @@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum) 0); if (IS_ERR((void __force *)user_page)) { - pr_err("Out of kernel pages trying do_mmap.\n"); + pr_err("Out of kernel pages trying do_mmap\n"); return; } /* Save the address in the thread_info struct */ info->unalign_jit_base = user_page; if (unaligned_printk) - pr_info("Unalign bundle: %d:%d, allocate page @%llx", + pr_info("Unalign bundle: %d:%d, allocate page @%llx\n", raw_smp_processor_id(), current->pid, (unsigned long long)user_page); } diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 6c0571216a9d..565e25a98334 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte) while (pte_migrating(*pte)) { barrier(); if (++retries > bound) - panic("Hit migrating PTE (%#llx) and" - " page PFN %#lx still migrating", + panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating", pte->val, pte_pfn(*pte)); } } @@ -292,11 +291,10 @@ static int handle_page_fault(struct pt_regs *regs, */ stack_offset = stack_pointer & (THREAD_SIZE-1); if (stack_offset < THREAD_SIZE / 8) { - pr_alert("Potential stack overrun: sp %#lx\n", - stack_pointer); + pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer); show_regs(regs); pr_alert("Killing current process %d/%s\n", - tsk->pid, tsk->comm); + tsk->pid, tsk->comm); do_group_exit(SIGKILL); } @@ -421,7 +419,7 @@ good_area: } else if (write) { #ifdef TEST_VERIFY_AREA if (!is_page_fault && regs->cs == KERNEL_CS) - pr_err("WP fault at "REGFMT"\n", regs->eip); + pr_err("WP fault at " REGFMT "\n", regs->eip); #endif if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -519,16 +517,15 @@ no_context: pte_t *pte = lookup_address(address); if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) - pr_crit("kernel tried to execute" - " non-executable page - exploit attempt?" - " (uid: %d)\n", current->uid); + pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n", + current->uid); } #endif if (address < PAGE_SIZE) pr_alert("Unable to handle kernel NULL pointer dereference\n"); else pr_alert("Unable to handle kernel paging request\n"); - pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", + pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n", address, regs->pc); show_regs(regs); @@ -575,9 +572,10 @@ do_sigbus: #ifndef __tilegx__ /* We must release ICS before panicking or we won't get anywhere. */ -#define ics_panic(fmt, ...) do { \ - __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ - panic(fmt, __VA_ARGS__); \ +#define ics_panic(fmt, ...) \ +do { \ + __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ + panic(fmt, ##__VA_ARGS__); \ } while (0) /* @@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, fault_num != INT_DTLB_ACCESS)) { unsigned long old_pc = regs->pc; regs->pc = pc; - ics_panic("Bad ICS page fault args:" - " old PC %#lx, fault %d/%d at %#lx\n", + ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx", old_pc, fault_num, write, address); } @@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, #endif fixup = search_exception_tables(pc); if (!fixup) - ics_panic("ICS atomic fault not in table:" - " PC %#lx, fault %d", pc, fault_num); + ics_panic("ICS atomic fault not in table: PC %#lx, fault %d", + pc, fault_num); regs->pc = fixup->fixup; regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); } @@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, set_thread_flag(TIF_ASYNC_TLB); if (async->fault_num != 0) { - panic("Second async fault %d;" - " old fault was %d (%#lx/%ld)", + panic("Second async fault %d; old fault was %d (%#lx/%ld)", fault_num, async->fault_num, address, write); } diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 33294fdc402e..cd3387370ebb 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); - pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," - " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", + pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", cache_pa, cache_control, cache_cpumask, cache_buf, (unsigned long)tlb_va, tlb_length, tlb_pgsize, - tlb_cpumask, tlb_buf, - asids, asidcount, rc); + tlb_cpumask, tlb_buf, asids, asidcount, rc); panic("Unsafe to continue."); } diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index e514899e1100..3270e0019266 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps) int level, base_shift; if ((1UL << log_ps) != ps || (log_ps & 1) != 0) { - pr_warn("Not enabling %ld byte huge pages;" - " must be a power of four.\n", ps); + pr_warn("Not enabling %ld byte huge pages; must be a power of four\n", + ps); return -EINVAL; } if (ps > 64*1024*1024*1024UL) { - pr_warn("Not enabling %ld MB huge pages;" - " largest legal value is 64 GB .\n", ps >> 20); + pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n", + ps >> 20); return -EINVAL; } else if (ps >= PUD_SIZE) { static long hv_jpage_size; if (hv_jpage_size == 0) hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO); if (hv_jpage_size != PUD_SIZE) { - pr_warn("Not enabling >= %ld MB huge pages:" - " hypervisor reports size %ld\n", + pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n", PUD_SIZE >> 20, hv_jpage_size); return -EINVAL; } @@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps) int shift_val = log_ps - base_shift; if (huge_shift[level] != 0) { int old_shift = base_shift + huge_shift[level]; - pr_warn("Not enabling %ld MB huge pages;" - " already have size %ld MB.\n", + pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n", ps >> 20, (1UL << old_shift) >> 20); return -EINVAL; } if (hv_set_pte_super_shift(level, shift_val) != 0) { - pr_warn("Not enabling %ld MB huge pages;" - " no hypervisor support.\n", ps >> 20); + pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n", + ps >> 20); return -EINVAL; } printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20); diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index caa270165f86..be240cc4978d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -357,11 +357,11 @@ static int __init setup_ktext(char *str) cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); if (cpumask_weight(&ktext_mask) > 1) { ktext_small = 1; - pr_info("ktext: using caching neighborhood %s " - "with small pages\n", buf); + pr_info("ktext: using caching neighborhood %s with small pages\n", + buf); } else { pr_info("ktext: caching on cpu %s with one huge page\n", - buf); + buf); } } @@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) int rc, i; if (ktext_arg_seen && ktext_hash) { - pr_warning("warning: \"ktext\" boot argument ignored" - " if \"kcache_hash\" sets up text hash-for-home\n"); + pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n"); ktext_small = 0; } if (kdata_arg_seen && kdata_hash) { - pr_warning("warning: \"kdata\" boot argument ignored" - " if \"kcache_hash\" sets up data hash-for-home\n"); + pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n"); } if (kdata_huge && !hash_default) { - pr_warning("warning: disabling \"kdata=huge\"; requires" - " kcache_hash=all or =allbutstack\n"); + pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n"); kdata_huge = 0; } @@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) pte[pte_ofs] = pfn_pte(pfn, prot); } else { if (kdata_huge) - printk(KERN_DEBUG "pre-shattered huge" - " page at %#lx\n", address); + printk(KERN_DEBUG "pre-shattered huge page at %#lx\n", + address); for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; pfn++, pte_ofs++, address += PAGE_SIZE) { pgprot_t prot = init_pgprot(address); @@ -501,8 +498,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) pr_info("ktext: not using unavailable cpus %s\n", buf); } if (cpumask_empty(&ktext_mask)) { - pr_warning("ktext: no valid cpus; caching on %d.\n", - smp_processor_id()); + pr_warn("ktext: no valid cpus; caching on %d\n", + smp_processor_id()); cpumask_copy(&ktext_mask, cpumask_of(smp_processor_id())); } @@ -798,11 +795,9 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM /* check that fixmap and pkmap do not overlap */ if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { - pr_err("fixmap and kmap areas overlap" - " - this will crash\n"); + pr_err("fixmap and kmap areas overlap - this will crash\n"); pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", - PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), - FIXADDR_START); + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START); BUG(); } #endif @@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) unsigned long addr = (unsigned long) begin; if (kdata_huge && !initfree) { - pr_warning("Warning: ignoring initfree=0:" - " incompatible with kdata=huge\n"); + pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n"); initfree = 1; } end = (end + PAGE_SIZE - 1) & PAGE_MASK; diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 5e86eac4bfae..7bf2491a9c1f 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -44,9 +44,7 @@ void show_mem(unsigned int filter) { struct zone *zone; - pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" - " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" - " pagecache:%lu swap:%lu\n", + pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n", (global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE)), (global_page_state(NR_INACTIVE_ANON) + diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 8035145f043b..62087028a9ce 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -632,6 +632,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) int fd = winch->fd; int err; char c; + struct pid *pgrp; if (fd != -1) { err = generic_read(fd, &c, NULL); @@ -657,7 +658,10 @@ static irqreturn_t winch_interrupt(int irq, void *data) if (line != NULL) { chan_window_size(line, &tty->winsize.ws_row, &tty->winsize.ws_col); - kill_pgrp(tty->pgrp, SIGWINCH, 1); + pgrp = tty_get_pgrp(tty); + if (pgrp) + kill_pgrp(pgrp, SIGWINCH, 1); + put_pid(pgrp); } tty_kref_put(tty); } diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 244b12c8cb39..9176fa11d49b 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -10,7 +10,6 @@ generic-y += exec.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += io.h generic-y += irq_regs.h diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index aa4a743dc4ab..941527e507f7 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -10,7 +10,26 @@ #include <asm/mmu.h> extern void uml_setup_stubs(struct mm_struct *mm); +/* + * Needed since we do not use the asm-generic/mm_hooks.h: + */ +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +{ + uml_setup_stubs(mm); +} extern void arch_exit_mmap(struct mm_struct *mm); +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} +/* + * end asm-generic/mm_hooks.h functions + */ #define deactivate_mm(tsk,mm) do { } while (0) @@ -41,11 +60,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, } } -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) -{ - uml_setup_stubs(mm); -} - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 5a2bb53faa42..3e0c19d0f4c5 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index ef470a7a3d0f..1cb5220afaf9 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -86,4 +86,15 @@ static inline void arch_dup_mmap(struct mm_struct *oldmm, { } +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + #endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 41a503c15862..d69f1cd87fd9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -24,6 +24,7 @@ config X86 select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select HAVE_AOUT if X86_32 @@ -248,6 +249,10 @@ config HAVE_INTEL_TXT def_bool y depends on INTEL_IOMMU && ACPI +config X86_INTEL_MPX + def_bool y + depends on CPU_SUP_INTEL + config X86_32_SMP def_bool y depends on X86_32 && SMP @@ -988,6 +993,24 @@ config X86_ESPFIX64 def_bool y depends on X86_16BIT && X86_64 +config X86_VSYSCALL_EMULATION + bool "Enable vsyscall emulation" if EXPERT + default y + depends on X86_64 + ---help--- + This enables emulation of the legacy vsyscall page. Disabling + it is roughly equivalent to booting with vsyscall=none, except + that it will also disable the helpful warning if a program + tries to use a vsyscall. With this option set to N, offending + programs will just segfault, citing addresses of the form + 0xffffffffff600?00. + + This option is required by many programs built before 2013, and + care should be used even with newer programs if set to N. + + Disabling this option saves about 7K of kernel size and + possibly 4K of additional runtime pagetable memory. + config TOSHIBA tristate "Toshiba Laptop support" depends on X86_32 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 45abc363dd3e..d999398928bc 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -3,6 +3,18 @@ # # create a compressed vmlinux image from the original vmlinux # +# vmlinuz is: +# decompression code (*.o) +# asm globals (piggy.S), including: +# vmlinux.bin.(gz|bz2|lzma|...) +# +# vmlinux.bin is: +# vmlinux stripped of debugging and comments +# vmlinux.bin.all is: +# vmlinux.bin + vmlinux.relocs +# vmlinux.bin.(gz|bz2|lzma|...) is: +# (see scripts/Makefile.lib size_append) +# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 @@ -35,7 +47,8 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/aslr.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o +vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ + $(objtree)/drivers/firmware/efi/libstub/lib.a $(obj)/vmlinux: $(vmlinux-objs-y) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 1acf605a646d..92b9a5f2aed6 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -21,8 +21,10 @@ static efi_system_table_t *sys_table; static struct efi_config *efi_early; -#define efi_call_early(f, ...) \ - efi_early->call(efi_early->f, __VA_ARGS__); +__pure const struct efi_config *__efi_early(void) +{ + return efi_early; +} #define BOOT_SERVICES(bits) \ static void setup_boot_services##bits(struct efi_config *c) \ @@ -285,8 +287,6 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) } } -#include "../../../../drivers/firmware/efi/libstub/efi-stub-helper.c" - static void find_bits(unsigned long mask, u8 *pos, u8 *size) { u8 first, len; diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h index c88c31ecad12..d487e727f1ec 100644 --- a/arch/x86/boot/compressed/eboot.h +++ b/arch/x86/boot/compressed/eboot.h @@ -103,20 +103,4 @@ struct efi_uga_draw_protocol { void *blt; }; -struct efi_config { - u64 image_handle; - u64 table; - u64 allocate_pool; - u64 allocate_pages; - u64 get_memory_map; - u64 free_pool; - u64 free_pages; - u64 locate_handle; - u64 handle_protocol; - u64 exit_boot_services; - u64 text_output; - efi_status_t (*call)(unsigned long, ...); - bool is64; -} __packed; - #endif /* BOOT_COMPRESSED_EBOOT_H */ diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 30dd59a9f0b4..dcc1c536cc21 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -260,7 +260,7 @@ static void handle_relocations(void *output, unsigned long output_len) /* * Process relocations: 32 bit relocations first then 64 bit after. - * Two sets of binary relocations are added to the end of the kernel + * Three sets of binary relocations are added to the end of the kernel * before compression. Each relocation table entry is the kernel * address of the location which needs to be updated stored as a * 32-bit value which is sign extended to 64 bits. @@ -270,6 +270,8 @@ static void handle_relocations(void *output, unsigned long output_len) * kernel bits... * 0 - zero terminator for 64 bit relocations * 64 bit relocation repeated + * 0 - zero terminator for inverse 32 bit relocations + * 32 bit inverse relocation repeated * 0 - zero terminator for 32 bit relocations * 32 bit relocation repeated * @@ -286,6 +288,16 @@ static void handle_relocations(void *output, unsigned long output_len) *(uint32_t *)ptr += delta; } #ifdef CONFIG_X86_64 + while (*--reloc) { + long extended = *reloc; + extended += map; + + ptr = (unsigned long)extended; + if (ptr < min_addr || ptr > max_addr) + error("inverse 32-bit relocation outside of kernel!\n"); + + *(int32_t *)ptr -= delta; + } for (reloc--; *reloc; reloc--) { long extended = *reloc; extended += map; diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 32d2e7056c87..419819d6dab3 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index a481dd4755d5..4c311ddd973b 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -7,6 +7,7 @@ CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c index aafe8ce0d65d..e26984f7ab8d 100644 --- a/arch/x86/crypto/aes_glue.c +++ b/arch/x86/crypto/aes_glue.c @@ -66,5 +66,5 @@ module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); -MODULE_ALIAS("aes-asm"); +MODULE_ALIAS_CRYPTO("aes"); +MODULE_ALIAS_CRYPTO("aes-asm"); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 888950f29fd9..ae855f4f64b7 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -43,10 +43,6 @@ #include <asm/crypto/glue_helper.h> #endif -#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE) -#define HAS_PCBC -#endif - /* This data is stored at the end of the crypto_tfm struct. * It's a type of per "session" data storage location. * This needs to be 16 byte aligned. @@ -547,7 +543,7 @@ static int ablk_ctr_init(struct crypto_tfm *tfm) #endif -#ifdef HAS_PCBC +#if IS_ENABLED(CONFIG_CRYPTO_PCBC) static int ablk_pcbc_init(struct crypto_tfm *tfm) { return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))"); @@ -1377,7 +1373,7 @@ static struct crypto_alg aesni_algs[] = { { }, }, #endif -#ifdef HAS_PCBC +#if IS_ENABLED(CONFIG_CRYPTO_PCBC) }, { .cra_name = "pcbc(aes)", .cra_driver_name = "pcbc-aes-aesni", @@ -1550,4 +1546,4 @@ module_exit(aesni_exit); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c index 8af519ed73d1..17c05531dfd1 100644 --- a/arch/x86/crypto/blowfish_glue.c +++ b/arch/x86/crypto/blowfish_glue.c @@ -478,5 +478,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized"); -MODULE_ALIAS("blowfish"); -MODULE_ALIAS("blowfish-asm"); +MODULE_ALIAS_CRYPTO("blowfish"); +MODULE_ALIAS_CRYPTO("blowfish-asm"); diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 4209a76fcdaa..9a07fafe3831 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized"); -MODULE_ALIAS("camellia"); -MODULE_ALIAS("camellia-asm"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-asm"); diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 87a041a10f4a..ed38d959add6 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized"); -MODULE_ALIAS("camellia"); -MODULE_ALIAS("camellia-asm"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-asm"); diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index c171dcbf192d..5c8b6266a394 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -1725,5 +1725,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized"); -MODULE_ALIAS("camellia"); -MODULE_ALIAS("camellia-asm"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-asm"); diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index e57e20ab5e0b..60ada677a928 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -491,4 +491,4 @@ module_exit(cast5_exit); MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("cast5"); +MODULE_ALIAS_CRYPTO("cast5"); diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 09f3677393e4..0160f68a57ff 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -611,4 +611,4 @@ module_exit(cast6_exit); MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("cast6"); +MODULE_ALIAS_CRYPTO("cast6"); diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 9d014a74ef96..1937fc1d8763 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini); MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crc32"); -MODULE_ALIAS("crc32-pclmul"); +MODULE_ALIAS_CRYPTO("crc32"); +MODULE_ALIAS_CRYPTO("crc32-pclmul"); diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index 6812ad98355c..28640c3d6af7 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crc32c"); -MODULE_ALIAS("crc32c-intel"); +MODULE_ALIAS_CRYPTO("crc32c"); +MODULE_ALIAS_CRYPTO("crc32c-intel"); diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c index 7845d7fd54c0..b6c67bf30fdf 100644 --- a/arch/x86/crypto/crct10dif-pclmul_glue.c +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crct10dif"); -MODULE_ALIAS("crct10dif-pclmul"); +MODULE_ALIAS_CRYPTO("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif-pclmul"); diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c index 0e9c0668fe4e..38a14f818ef1 100644 --- a/arch/x86/crypto/des3_ede_glue.c +++ b/arch/x86/crypto/des3_ede_glue.c @@ -502,8 +502,8 @@ module_exit(des3_ede_x86_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); -MODULE_ALIAS("des3_ede"); -MODULE_ALIAS("des3_ede-asm"); -MODULE_ALIAS("des"); -MODULE_ALIAS("des-asm"); +MODULE_ALIAS_CRYPTO("des3_ede"); +MODULE_ALIAS_CRYPTO("des3_ede-asm"); +MODULE_ALIAS_CRYPTO("des"); +MODULE_ALIAS_CRYPTO("des-asm"); MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index 98d7a188f46b..f368ba261739 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/crypto.h> #include <asm/i387.h> struct crypto_fpu_ctx { @@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void) { crypto_unregister_template(&crypto_fpu_tmpl); } + +MODULE_ALIAS_CRYPTO("fpu"); diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 88bb7ba8b175..8253d85aa165 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " "acclerated by PCLMULQDQ-NI"); -MODULE_ALIAS("ghash"); +MODULE_ALIAS_CRYPTO("ghash"); diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c index 5e8e67739bb5..399a29d067d6 100644 --- a/arch/x86/crypto/salsa20_glue.c +++ b/arch/x86/crypto/salsa20_glue.c @@ -119,5 +119,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); -MODULE_ALIAS("salsa20"); -MODULE_ALIAS("salsa20-asm"); +MODULE_ALIAS_CRYPTO("salsa20"); +MODULE_ALIAS_CRYPTO("salsa20-asm"); diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 2fae489b1524..437e47a4d302 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -558,5 +558,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized"); -MODULE_ALIAS("serpent"); -MODULE_ALIAS("serpent-asm"); +MODULE_ALIAS_CRYPTO("serpent"); +MODULE_ALIAS_CRYPTO("serpent-asm"); diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index ff4870870972..7e217398b4eb 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -617,4 +617,4 @@ module_exit(serpent_exit); MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("serpent"); +MODULE_ALIAS_CRYPTO("serpent"); diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 8c95f8637306..bf025adaea01 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit); MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("serpent"); +MODULE_ALIAS_CRYPTO("serpent"); diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c index 99eefd812958..a225a5ca1037 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb.c +++ b/arch/x86/crypto/sha-mb/sha1_mb.c @@ -204,8 +204,7 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str continue; } - if (ctx) - ctx->status = HASH_CTX_STS_IDLE; + ctx->status = HASH_CTX_STS_IDLE; return ctx; } diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index 74d16ef707c7..6c20fe04a738 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -278,4 +278,4 @@ module_exit(sha1_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index f248546da1ca..8fad72f4dfd2 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -211,7 +211,7 @@ static int sha224_ssse3_final(struct shash_desc *desc, u8 *hash) sha256_ssse3_final(desc, D); memcpy(hash, D, SHA224_DIGEST_SIZE); - memset(D, 0, SHA256_DIGEST_SIZE); + memzero_explicit(D, SHA256_DIGEST_SIZE); return 0; } @@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); -MODULE_ALIAS("sha256"); -MODULE_ALIAS("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 8626b03e83b7..0b6af26832bf 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -219,7 +219,7 @@ static int sha384_ssse3_final(struct shash_desc *desc, u8 *hash) sha512_ssse3_final(desc, D); memcpy(hash, D, SHA384_DIGEST_SIZE); - memset(D, 0, SHA512_DIGEST_SIZE); + memzero_explicit(D, SHA512_DIGEST_SIZE); return 0; } @@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); -MODULE_ALIAS("sha512"); -MODULE_ALIAS("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha384"); diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 4e3c665be129..1ac531ea9bcc 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -579,4 +579,4 @@ module_exit(twofish_exit); MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("twofish"); +MODULE_ALIAS_CRYPTO("twofish"); diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c index 0a5202303501..77e06c2da83d 100644 --- a/arch/x86/crypto/twofish_glue.c +++ b/arch/x86/crypto/twofish_glue.c @@ -96,5 +96,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized"); -MODULE_ALIAS("twofish"); -MODULE_ALIAS("twofish-asm"); +MODULE_ALIAS_CRYPTO("twofish"); +MODULE_ALIAS_CRYPTO("twofish-asm"); diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 13e63b3e1dfb..56d8a08ee479 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -495,5 +495,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized"); -MODULE_ALIAS("twofish"); -MODULE_ALIAS("twofish-asm"); +MODULE_ALIAS_CRYPTO("twofish"); +MODULE_ALIAS_CRYPTO("twofish-asm"); diff --git a/arch/x86/ia32/audit.c b/arch/x86/ia32/audit.c index 5d7b381da692..2eccc8932ae6 100644 --- a/arch/x86/ia32/audit.c +++ b/arch/x86/ia32/audit.c @@ -35,6 +35,7 @@ int ia32_classify_syscall(unsigned syscall) case __NR_socketcall: return 4; case __NR_execve: + case __NR_execveat: return 5; default: return 1; diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index df91466f973d..ae6aad1d24f7 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -342,8 +342,8 @@ static int load_aout_binary(struct linux_binprm *bprm) time_after(jiffies, error_time + 5*HZ)) { printk(KERN_WARNING "fd_offset is not page aligned. Please convert " - "program: %s\n", - bprm->file->f_path.dentry->d_name.name); + "program: %pD\n", + bprm->file); error_time = jiffies; } #endif @@ -429,8 +429,8 @@ static int load_aout_library(struct file *file) if (time_after(jiffies, error_time + 5*HZ)) { printk(KERN_WARNING "N_TXTOFF is not page aligned. Please convert " - "library: %s\n", - file->f_path.dentry->d_name.name); + "library: %pD\n", + file); error_time = jiffies; } #endif diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index ffe71228fc10..82e8a1d44658 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -480,6 +480,7 @@ GLOBAL(\label) PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn PTREGSCALL stub32_sigreturn, sys32_sigreturn PTREGSCALL stub32_execve, compat_sys_execve + PTREGSCALL stub32_execveat, compat_sys_execveat PTREGSCALL stub32_fork, sys_fork PTREGSCALL stub32_vfork, sys_vfork diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 0f4460b5636d..2ab1eb33106e 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -24,78 +24,28 @@ #define wmb() asm volatile("sfence" ::: "memory") #endif -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() #ifdef CONFIG_X86_PPRO_FENCE -# define smp_rmb() rmb() +#define dma_rmb() rmb() #else -# define smp_rmb() barrier() +#define dma_rmb() barrier() #endif +#define dma_wmb() barrier() + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() dma_rmb() #define smp_wmb() barrier() -#define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* !SMP */ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif /* SMP */ +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + #if defined(CONFIG_X86_PPRO_FENCE) /* diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 9863ee3747da..47c8e32f621a 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -5,65 +5,6 @@ #include <asm-generic/cacheflush.h> #include <asm/special_insns.h> -#ifdef CONFIG_X86_PAT -/* - * X86 PAT uses page flags WC and Uncached together to keep track of - * memory type of pages that have backing page struct. X86 PAT supports 3 - * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and - * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not - * been changed from its default (value of -1 used to denote this). - * Note we do not support _PAGE_CACHE_UC here. - */ - -#define _PGMT_DEFAULT 0 -#define _PGMT_WC (1UL << PG_arch_1) -#define _PGMT_UC_MINUS (1UL << PG_uncached) -#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) -#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) -#define _PGMT_CLEAR_MASK (~_PGMT_MASK) - -static inline unsigned long get_page_memtype(struct page *pg) -{ - unsigned long pg_flags = pg->flags & _PGMT_MASK; - - if (pg_flags == _PGMT_DEFAULT) - return -1; - else if (pg_flags == _PGMT_WC) - return _PAGE_CACHE_WC; - else if (pg_flags == _PGMT_UC_MINUS) - return _PAGE_CACHE_UC_MINUS; - else - return _PAGE_CACHE_WB; -} - -static inline void set_page_memtype(struct page *pg, unsigned long memtype) -{ - unsigned long memtype_flags = _PGMT_DEFAULT; - unsigned long old_flags; - unsigned long new_flags; - - switch (memtype) { - case _PAGE_CACHE_WC: - memtype_flags = _PGMT_WC; - break; - case _PAGE_CACHE_UC_MINUS: - memtype_flags = _PGMT_UC_MINUS; - break; - case _PAGE_CACHE_WB: - memtype_flags = _PGMT_WB; - break; - } - - do { - old_flags = pg->flags; - new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; - } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); -} -#else -static inline unsigned long get_page_memtype(struct page *pg) { return -1; } -static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } -#endif - /* * The set_memory_* API can be used to change various attributes of a virtual * address range. The attributes include: diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0bb1335313b2..aede2c347bde 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -189,6 +189,11 @@ #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ +#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ +#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ +#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 97534a7d38e3..f226df064660 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -10,6 +10,12 @@ * cpu_feature_enabled(). */ +#ifdef CONFIG_X86_INTEL_MPX +# define DISABLE_MPX 0 +#else +# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) +#endif + #ifdef CONFIG_X86_64 # define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) @@ -34,6 +40,6 @@ #define DISABLED_MASK6 0 #define DISABLED_MASK7 0 #define DISABLED_MASK8 0 -#define DISABLED_MASK9 0 +#define DISABLED_MASK9 (DISABLE_MPX) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index 0bdb0c54d9a1..fe884e18fa6e 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h @@ -70,7 +70,7 @@ #define MAX_DMA_CHANNELS 8 /* 16MB ISA DMA zone */ -#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) +#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 9b11757975d0..25bce45c6fc4 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -158,6 +158,30 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( } #endif /* CONFIG_EFI_MIXED */ + +/* arch specific definitions used by the stub code */ + +struct efi_config { + u64 image_handle; + u64 table; + u64 allocate_pool; + u64 allocate_pages; + u64 get_memory_map; + u64 free_pool; + u64 free_pages; + u64 locate_handle; + u64 handle_protocol; + u64 exit_boot_services; + u64 text_output; + efi_status_t (*call)(unsigned long, ...); + bool is64; +} __packed; + +__pure const struct efi_config *__efi_early(void); + +#define efi_call_early(f, ...) \ + __efi_early()->call(__efi_early()->f, __VA_ARGS__); + extern bool efi_reboot_required(void); #else diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h index 2519d0679d99..c3dd5e71f439 100644 --- a/arch/x86/include/asm/fb.h +++ b/arch/x86/include/asm/fb.h @@ -8,8 +8,12 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, unsigned long off) { + unsigned long prot; + + prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; if (boot_cpu_data.x86 > 3) - pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; + pgprot_val(vma->vm_page_prot) = + prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); } extern int fb_is_primary_device(struct fb_info *info); diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index ffb1733ac91f..f80d70009ff8 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -69,7 +69,9 @@ enum fixed_addresses { #ifdef CONFIG_X86_32 FIX_HOLE, #else +#ifdef CONFIG_X86_VSYSCALL_EMULATION VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, +#endif #ifdef CONFIG_PARAVIRT_CLOCK PVCLOCK_FIXMAP_BEGIN, PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, @@ -136,9 +138,7 @@ enum fixed_addresses { extern void reserve_top_address(unsigned long reserve); #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) -#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) extern int fixmaps_set; diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index e1f7fecaa7d6..f45acad3c4b6 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -1,39 +1,6 @@ #ifndef _ASM_X86_FTRACE_H #define _ASM_X86_FTRACE_H -#ifdef __ASSEMBLY__ - - /* skip is set if the stack was already partially adjusted */ - .macro MCOUNT_SAVE_FRAME skip=0 - /* - * We add enough stack to save all regs. - */ - subq $(SS+8-\skip), %rsp - movq %rax, RAX(%rsp) - movq %rcx, RCX(%rsp) - movq %rdx, RDX(%rsp) - movq %rsi, RSI(%rsp) - movq %rdi, RDI(%rsp) - movq %r8, R8(%rsp) - movq %r9, R9(%rsp) - /* Move RIP to its proper location */ - movq SS+8(%rsp), %rdx - movq %rdx, RIP(%rsp) - .endm - - .macro MCOUNT_RESTORE_FRAME skip=0 - movq R9(%rsp), %r9 - movq R8(%rsp), %r8 - movq RDI(%rsp), %rdi - movq RSI(%rsp), %rsi - movq RDX(%rsp), %rdx - movq RCX(%rsp), %rcx - movq RAX(%rsp), %rax - addq $(SS+8-\skip), %rsp - .endm - -#endif - #ifdef CONFIG_FUNCTION_TRACER #ifdef CC_USING_FENTRY # define MCOUNT_ADDR ((long)(__fentry__)) diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h deleted file mode 100644 index e8c58f88b1d4..000000000000 --- a/arch/x86/include/asm/hash.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_X86_HASH_H -#define _ASM_X86_HASH_H - -struct fast_hash_ops; -extern void setup_arch_fast_hash(struct fast_hash_ops *ops); - -#endif /* _ASM_X86_HASH_H */ diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 302a323b3f67..04e9d023168f 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -38,17 +38,20 @@ extern unsigned long highstart_pfn, highend_pfn; /* * Ordering is: * - * FIXADDR_TOP - * fixed_addresses - * FIXADDR_START - * temp fixed addresses - * FIXADDR_BOOT_START - * Persistent kmap area - * PKMAP_BASE - * VMALLOC_END - * Vmalloc area - * VMALLOC_START - * high_memory + * high memory on: high_memory off: + * FIXADDR_TOP FIXADDR_TOP + * fixed addresses fixed addresses + * FIXADDR_START FIXADDR_START + * temp fixed addresses/persistent kmap area VMALLOC_END + * PKMAP_BASE temp fixed addresses/vmalloc area + * VMALLOC_END VMALLOC_START + * vmalloc area high_memory + * VMALLOC_START + * high_memory + * + * The temp fixed area is only used during boot for early_ioremap(), and + * it is unused when the ioremap() is functional. vmalloc/pkmap area become + * available after early boot so the temp fixed area is available for re-use. */ #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 48eb30a86062..47f29b1d1846 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -65,6 +65,7 @@ struct insn { unsigned char x86_64; const insn_byte_t *kaddr; /* kernel address of insn to analyze */ + const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */ const insn_byte_t *next_byte; }; @@ -96,7 +97,7 @@ struct insn { #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ -extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); +extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64); extern void insn_get_prefixes(struct insn *insn); extern void insn_get_opcode(struct insn *insn); extern void insn_get_modrm(struct insn *insn); @@ -115,12 +116,13 @@ static inline void insn_get_attribute(struct insn *insn) extern int insn_rip_relative(struct insn *insn); /* Init insn for kernel text */ -static inline void kernel_insn_init(struct insn *insn, const void *kaddr) +static inline void kernel_insn_init(struct insn *insn, + const void *kaddr, int buf_len) { #ifdef CONFIG_X86_64 - insn_init(insn, kaddr, 1); + insn_init(insn, kaddr, buf_len, 1); #else /* CONFIG_X86_32 */ - insn_init(insn, kaddr, 0); + insn_init(insn, kaddr, buf_len, 0); #endif } diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 0cdbe6e81b45..34a5b93704d3 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -318,7 +318,7 @@ extern void *xlate_dev_mem_ptr(phys_addr_t phys); extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, - unsigned long prot_val); + enum page_cache_mode pcm); extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); extern bool is_early_ioremap_ptep(pte_t *ptep); diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 958b90f761e5..51b26e895933 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -34,6 +34,10 @@ #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ #define MCI_STATUS_AR (1ULL<<55) /* Action required */ +/* AMD-specific bits */ +#define MCI_STATUS_DEFERRED (1ULL<<44) /* declare an uncorrected error */ +#define MCI_STATUS_POISON (1ULL<<43) /* access poisonous data */ + /* * Note that the full MCACOD field of IA32_MCi_STATUS MSR is * bits 15:0. But bit 12 is the 'F' bit, defined for corrected @@ -78,7 +82,6 @@ /* Software defined banks */ #define MCE_EXTENDED_BANK 128 #define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0) -#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) #define MCE_LOG_LEN 32 #define MCE_LOG_SIGNATURE "MACHINECHECK" diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 64dc362506b7..201b520521ed 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -78,6 +78,7 @@ static inline void __exit exit_amd_microcode(void) {} extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); extern int __init save_microcode_in_initrd(void); +void reload_early_microcode(void); #else static inline void __init load_ucode_bsp(void) {} static inline void load_ucode_ap(void) {} @@ -85,6 +86,7 @@ static inline int __init save_microcode_in_initrd(void) { return 0; } +static inline void reload_early_microcode(void) {} #endif #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index b7b10b82d3e5..af935397e053 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int apply_microcode_amd(int cpu); -extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); +extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); #define PATCH_MAX_SIZE PAGE_SIZE extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; @@ -68,10 +68,12 @@ extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; extern void __init load_ucode_amd_bsp(void); extern void load_ucode_amd_ap(void); extern int __init save_microcode_in_initrd_amd(void); +void reload_ucode_amd(void); #else static inline void __init load_ucode_amd_bsp(void) {} static inline void load_ucode_amd_ap(void) {} static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } +void reload_ucode_amd(void) {} #endif #endif /* _ASM_X86_MICROCODE_AMD_H */ diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h index bbe296e0bce1..dd4c20043ce7 100644 --- a/arch/x86/include/asm/microcode_intel.h +++ b/arch/x86/include/asm/microcode_intel.h @@ -68,11 +68,13 @@ extern void __init load_ucode_intel_bsp(void); extern void load_ucode_intel_ap(void); extern void show_ucode_info_early(void); extern int __init save_microcode_in_initrd_intel(void); +void reload_ucode_intel(void); #else static inline __init void load_ucode_intel_bsp(void) {} static inline void load_ucode_intel_ap(void) {} static inline void show_ucode_info_early(void) {} static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } +static inline void reload_ucode_intel(void) {} #endif #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 166af2a8e865..40269a2bf6f9 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -10,9 +10,8 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/paravirt.h> +#include <asm/mpx.h> #ifndef CONFIG_PARAVIRT -#include <asm-generic/mm_hooks.h> - static inline void paravirt_activate_mm(struct mm_struct *prev, struct mm_struct *next) { @@ -53,7 +52,16 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Stop flush ipis for the previous mm */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); - /* Load the LDT, if the LDT is different: */ + /* + * Load the LDT, if the LDT is different. + * + * It's possible leave_mm(prev) has been called. If so, + * then prev->context.ldt could be out of sync with the + * LDT descriptor or the LDT register. This can only happen + * if prev->context.ldt is non-null, since we never free + * an LDT. But LDTs can't be shared across mms, so + * prev->context.ldt won't be equal to next->context.ldt. + */ if (unlikely(prev->context.ldt != next->context.ldt)) load_LDT_nolock(&next->context); } @@ -102,4 +110,27 @@ do { \ } while (0) #endif +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + paravirt_arch_dup_mmap(oldmm, mm); +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ + paravirt_arch_exit_mmap(mm); +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ + mpx_mm_init(mm); +} + +static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + mpx_notify_unmap(mm, vma, start, end); +} + #endif /* _ASM_X86_MMU_CONTEXT_H */ diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h new file mode 100644 index 000000000000..a952a13d59a7 --- /dev/null +++ b/arch/x86/include/asm/mpx.h @@ -0,0 +1,103 @@ +#ifndef _ASM_X86_MPX_H +#define _ASM_X86_MPX_H + +#include <linux/types.h> +#include <asm/ptrace.h> +#include <asm/insn.h> + +/* + * NULL is theoretically a valid place to put the bounds + * directory, so point this at an invalid address. + */ +#define MPX_INVALID_BOUNDS_DIR ((void __user *)-1) +#define MPX_BNDCFG_ENABLE_FLAG 0x1 +#define MPX_BD_ENTRY_VALID_FLAG 0x1 + +#ifdef CONFIG_X86_64 + +/* upper 28 bits [47:20] of the virtual address in 64-bit used to + * index into bounds directory (BD). + */ +#define MPX_BD_ENTRY_OFFSET 28 +#define MPX_BD_ENTRY_SHIFT 3 +/* bits [19:3] of the virtual address in 64-bit used to index into + * bounds table (BT). + */ +#define MPX_BT_ENTRY_OFFSET 17 +#define MPX_BT_ENTRY_SHIFT 5 +#define MPX_IGN_BITS 3 +#define MPX_BD_ENTRY_TAIL 3 + +#else + +#define MPX_BD_ENTRY_OFFSET 20 +#define MPX_BD_ENTRY_SHIFT 2 +#define MPX_BT_ENTRY_OFFSET 10 +#define MPX_BT_ENTRY_SHIFT 4 +#define MPX_IGN_BITS 2 +#define MPX_BD_ENTRY_TAIL 2 + +#endif + +#define MPX_BD_SIZE_BYTES (1UL<<(MPX_BD_ENTRY_OFFSET+MPX_BD_ENTRY_SHIFT)) +#define MPX_BT_SIZE_BYTES (1UL<<(MPX_BT_ENTRY_OFFSET+MPX_BT_ENTRY_SHIFT)) + +#define MPX_BNDSTA_TAIL 2 +#define MPX_BNDCFG_TAIL 12 +#define MPX_BNDSTA_ADDR_MASK (~((1UL<<MPX_BNDSTA_TAIL)-1)) +#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1)) +#define MPX_BT_ADDR_MASK (~((1UL<<MPX_BD_ENTRY_TAIL)-1)) + +#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1)) +#define MPX_BNDSTA_ERROR_CODE 0x3 + +#define MPX_BD_ENTRY_MASK ((1<<MPX_BD_ENTRY_OFFSET)-1) +#define MPX_BT_ENTRY_MASK ((1<<MPX_BT_ENTRY_OFFSET)-1) +#define MPX_GET_BD_ENTRY_OFFSET(addr) ((((addr)>>(MPX_BT_ENTRY_OFFSET+ \ + MPX_IGN_BITS)) & MPX_BD_ENTRY_MASK) << MPX_BD_ENTRY_SHIFT) +#define MPX_GET_BT_ENTRY_OFFSET(addr) ((((addr)>>MPX_IGN_BITS) & \ + MPX_BT_ENTRY_MASK) << MPX_BT_ENTRY_SHIFT) + +#ifdef CONFIG_X86_INTEL_MPX +siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, + struct xsave_struct *xsave_buf); +int mpx_handle_bd_fault(struct xsave_struct *xsave_buf); +static inline int kernel_managing_mpx_tables(struct mm_struct *mm) +{ + return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR); +} +static inline void mpx_mm_init(struct mm_struct *mm) +{ + /* + * NULL is theoretically a valid place to put the bounds + * directory, so point this at an invalid address. + */ + mm->bd_addr = MPX_INVALID_BOUNDS_DIR; +} +void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long start, unsigned long end); +#else +static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, + struct xsave_struct *xsave_buf) +{ + return NULL; +} +static inline int mpx_handle_bd_fault(struct xsave_struct *xsave_buf) +{ + return -EINVAL; +} +static inline int kernel_managing_mpx_tables(struct mm_struct *mm) +{ + return 0; +} +static inline void mpx_mm_init(struct mm_struct *mm) +{ +} +static inline void mpx_notify_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} +#endif /* CONFIG_X86_INTEL_MPX */ + +#endif /* _ASM_X86_MPX_H */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index f408caf73430..b3bebf9e5746 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -39,6 +39,8 @@ void copy_page(void *to, void *from); #endif /* !__ASSEMBLY__ */ -#define __HAVE_ARCH_GATE_AREA 1 +#ifdef CONFIG_X86_VSYSCALL_EMULATION +# define __HAVE_ARCH_GATE_AREA 1 +#endif #endif /* _ASM_X86_PAGE_64_H */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index cd6e1610e29e..32444ae939ca 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -330,13 +330,13 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); } -static inline void arch_exit_mmap(struct mm_struct *mm) +static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) { PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); } @@ -986,5 +986,15 @@ extern void default_banner(void); #endif /* __ASSEMBLY__ */ #else /* CONFIG_PARAVIRT */ # define default_banner x86_init_noop +#ifndef __ASSEMBLY__ +static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + +static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) +{ +} +#endif /* __ASSEMBLY__ */ #endif /* !CONFIG_PARAVIRT */ #endif /* _ASM_X86_PARAVIRT_H */ diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index e2c1668dde7a..91bc4ba95f91 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -11,16 +11,17 @@ static const int pat_enabled; #endif extern void pat_init(void); +void pat_init_cache_modes(void); extern int reserve_memtype(u64 start, u64 end, - unsigned long req_type, unsigned long *ret_type); + enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); extern int free_memtype(u64 start, u64 end); extern int kernel_map_sync_memtype(u64 base, unsigned long size, - unsigned long flag); + enum page_cache_mode pcm); int io_reserve_memtype(resource_size_t start, resource_size_t end, - unsigned long *type); + enum page_cache_mode *pcm); void io_free_memtype(resource_size_t start, resource_size_t end); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index fd472181a1d0..e0ba66ca68c6 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -64,7 +64,7 @@ #define __percpu_prefix "" #endif -#define __percpu_arg(x) __percpu_prefix "%P" #x +#define __percpu_arg(x) __percpu_prefix "%" #x /* * Initialized pointers to per-cpu variables needed for the boot @@ -179,29 +179,58 @@ do { \ } \ } while (0) -#define percpu_from_op(op, var, constraint) \ +#define percpu_from_op(op, var) \ ({ \ typeof(var) pfo_ret__; \ switch (sizeof(var)) { \ case 1: \ asm(op "b "__percpu_arg(1)",%0" \ : "=q" (pfo_ret__) \ - : constraint); \ + : "m" (var)); \ break; \ case 2: \ asm(op "w "__percpu_arg(1)",%0" \ : "=r" (pfo_ret__) \ - : constraint); \ + : "m" (var)); \ break; \ case 4: \ asm(op "l "__percpu_arg(1)",%0" \ : "=r" (pfo_ret__) \ - : constraint); \ + : "m" (var)); \ break; \ case 8: \ asm(op "q "__percpu_arg(1)",%0" \ : "=r" (pfo_ret__) \ - : constraint); \ + : "m" (var)); \ + break; \ + default: __bad_percpu_size(); \ + } \ + pfo_ret__; \ +}) + +#define percpu_stable_op(op, var) \ +({ \ + typeof(var) pfo_ret__; \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b "__percpu_arg(P1)",%0" \ + : "=q" (pfo_ret__) \ + : "p" (&(var))); \ + break; \ + case 2: \ + asm(op "w "__percpu_arg(P1)",%0" \ + : "=r" (pfo_ret__) \ + : "p" (&(var))); \ + break; \ + case 4: \ + asm(op "l "__percpu_arg(P1)",%0" \ + : "=r" (pfo_ret__) \ + : "p" (&(var))); \ + break; \ + case 8: \ + asm(op "q "__percpu_arg(P1)",%0" \ + : "=r" (pfo_ret__) \ + : "p" (&(var))); \ break; \ default: __bad_percpu_size(); \ } \ @@ -359,11 +388,11 @@ do { \ * per-thread variables implemented as per-cpu variables and thus * stable for the duration of the respective task. */ -#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) +#define this_cpu_read_stable(var) percpu_stable_op("mov", var) -#define raw_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) -#define raw_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) -#define raw_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) +#define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp) +#define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp) +#define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp) #define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) #define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) @@ -381,9 +410,9 @@ do { \ #define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) #define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) -#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) -#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) -#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) +#define this_cpu_read_1(pcp) percpu_from_op("mov", pcp) +#define this_cpu_read_2(pcp) percpu_from_op("mov", pcp) +#define this_cpu_read_4(pcp) percpu_from_op("mov", pcp) #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) @@ -435,7 +464,7 @@ do { \ * 32 bit must fall back to generic operations. */ #ifdef CONFIG_X86_64 -#define raw_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) +#define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp) #define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) #define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) @@ -444,7 +473,7 @@ do { \ #define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) -#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) +#define this_cpu_read_8(pcp) percpu_from_op("mov", pcp) #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) @@ -522,7 +551,7 @@ static inline int x86_this_cpu_variable_test_bit(int nr, #include <asm-generic/percpu.h> /* We can use this directly for local CPU (faster). */ -DECLARE_PER_CPU(unsigned long, this_cpu_off); +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8dfc9fd094a3..dc0f6ed35b08 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -177,6 +177,9 @@ struct x86_pmu_capability { #define IBS_CAPS_BRNTRGT (1U<<5) #define IBS_CAPS_OPCNTEXT (1U<<6) #define IBS_CAPS_RIPINVALIDCHK (1U<<7) +#define IBS_CAPS_OPBRNFUSE (1U<<8) +#define IBS_CAPS_FETCHCTLEXTD (1U<<9) +#define IBS_CAPS_OPDATA4 (1U<<10) #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | IBS_CAPS_FETCHSAM \ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index aa97a070f09f..e8a5454acc99 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -9,9 +9,10 @@ /* * Macro to mark a page protection value as UC- */ -#define pgprot_noncached(prot) \ - ((boot_cpu_data.x86 > 3) \ - ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ +#define pgprot_noncached(prot) \ + ((boot_cpu_data.x86 > 3) \ + ? (__pgprot(pgprot_val(prot) | \ + cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ : (prot)) #ifndef __ASSEMBLY__ @@ -99,6 +100,11 @@ static inline int pte_young(pte_t pte) return pte_flags(pte) & _PAGE_ACCESSED; } +static inline int pmd_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_DIRTY; +} + static inline int pmd_young(pmd_t pmd) { return pmd_flags(pmd) & _PAGE_ACCESSED; @@ -404,8 +410,8 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, - unsigned long flags, - unsigned long new_flags) + enum page_cache_mode pcm, + enum page_cache_mode new_pcm) { /* * PAT type is always WB for untracked ranges, so no need to check. @@ -419,10 +425,10 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, * - request is uncached, return cannot be write-back * - request is write-combine, return cannot be write-back */ - if ((flags == _PAGE_CACHE_UC_MINUS && - new_flags == _PAGE_CACHE_WB) || - (flags == _PAGE_CACHE_WC && - new_flags == _PAGE_CACHE_WB)) { + if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WC && + new_pcm == _PAGE_CACHE_MODE_WB)) { return 0; } diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index ed5903be26fe..9fb2f2bc8245 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -37,7 +37,7 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ +#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ & PMD_MASK) #ifdef CONFIG_HIGHMEM diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 7166e25ecb57..602b6028c5b6 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -63,6 +63,8 @@ typedef struct { pteval_t pte; } pte_t; #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define ESPFIX_PGD_ENTRY _AC(-2, UL) #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT) +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) +#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) #define EARLY_DYNAMIC_PAGE_TABLES 64 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 07789647bf33..25bcd4a89517 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -128,11 +128,28 @@ _PAGE_SOFT_DIRTY | _PAGE_NUMA) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) -#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) -#define _PAGE_CACHE_WB (0) -#define _PAGE_CACHE_WC (_PAGE_PWT) -#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) -#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) +/* + * The cache modes defined here are used to translate between pure SW usage + * and the HW defined cache mode bits and/or PAT entries. + * + * The resulting bits for PWT, PCD and PAT should be chosen in a way + * to have the WB mode at index 0 (all bits clear). This is the default + * right now and likely would break too much if changed. + */ +#ifndef __ASSEMBLY__ +enum page_cache_mode { + _PAGE_CACHE_MODE_WB = 0, + _PAGE_CACHE_MODE_WC = 1, + _PAGE_CACHE_MODE_UC_MINUS = 2, + _PAGE_CACHE_MODE_UC = 3, + _PAGE_CACHE_MODE_WT = 4, + _PAGE_CACHE_MODE_WP = 5, + _PAGE_CACHE_MODE_NUM = 8 +}; +#endif + +#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) +#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ @@ -156,41 +173,27 @@ #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) -#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) -#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) -#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) -#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) +#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) -#define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) -#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define __PAGE_KERNEL_IO (__PAGE_KERNEL) #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) -#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS) -#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) -#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) -#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) -#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) -#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) -#define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE) #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) -#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS) -#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC) /* xwr */ #define __P000 PAGE_NONE @@ -341,6 +344,59 @@ static inline pmdval_t pmdnuma_flags(pmd_t pmd) #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) +extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM]; +extern uint8_t __pte2cachemode_tbl[8]; + +#define __pte2cm_idx(cb) \ + ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \ + (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \ + (((cb) >> _PAGE_BIT_PWT) & 1)) +#define __cm_idx2pte(i) \ + ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \ + (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \ + (((i) & 1) << _PAGE_BIT_PWT)) + +static inline unsigned long cachemode2protval(enum page_cache_mode pcm) +{ + if (likely(pcm == 0)) + return 0; + return __cachemode2pte_tbl[pcm]; +} +static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm) +{ + return __pgprot(cachemode2protval(pcm)); +} +static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) +{ + unsigned long masked; + + masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK; + if (likely(masked == 0)) + return 0; + return __pte2cachemode_tbl[__pte2cm_idx(masked)]; +} +static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) +{ + pgprot_t new; + unsigned long val; + + val = pgprot_val(pgprot); + pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | + ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); + return new; +} +static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) +{ + pgprot_t new; + unsigned long val; + + val = pgprot_val(pgprot); + pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | + ((val & _PAGE_PAT_LARGE) >> + (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); + return new; +} + typedef struct page *pgtable_t; @@ -396,6 +452,7 @@ static inline void update_page_count(int level, unsigned long pages) { } extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, unsigned int *level); +extern pmd_t *lookup_pmd_address(unsigned long address); extern phys_addr_t slow_virt_to_phys(void *__address); extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, unsigned numpages, unsigned long page_flags); diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h index 0a4e140315b6..7249e6d0902d 100644 --- a/arch/x86/include/asm/platform_sst_audio.h +++ b/arch/x86/include/asm/platform_sst_audio.h @@ -16,6 +16,9 @@ #include <linux/sfi.h> +#define MAX_NUM_STREAMS_MRFLD 25 +#define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD + enum sst_audio_task_id_mrfld { SST_TASK_ID_NONE = 0, SST_TASK_ID_SBA = 1, @@ -73,6 +76,65 @@ struct sst_platform_data { unsigned int strm_map_size; }; +struct sst_info { + u32 iram_start; + u32 iram_end; + bool iram_use; + u32 dram_start; + u32 dram_end; + bool dram_use; + u32 imr_start; + u32 imr_end; + bool imr_use; + u32 mailbox_start; + bool use_elf; + bool lpe_viewpt_rqd; + unsigned int max_streams; + u32 dma_max_len; + u8 num_probes; +}; + +struct sst_lib_dnld_info { + unsigned int mod_base; + unsigned int mod_end; + unsigned int mod_table_offset; + unsigned int mod_table_size; + bool mod_ddr_dnld; +}; + +struct sst_res_info { + unsigned int shim_offset; + unsigned int shim_size; + unsigned int shim_phy_addr; + unsigned int ssp0_offset; + unsigned int ssp0_size; + unsigned int dma0_offset; + unsigned int dma0_size; + unsigned int dma1_offset; + unsigned int dma1_size; + unsigned int iram_offset; + unsigned int iram_size; + unsigned int dram_offset; + unsigned int dram_size; + unsigned int mbox_offset; + unsigned int mbox_size; + unsigned int acpi_lpe_res_index; + unsigned int acpi_ddr_index; + unsigned int acpi_ipc_irq_index; +}; + +struct sst_ipc_info { + int ipc_offset; + unsigned int mbox_recv_off; +}; + +struct sst_platform_info { + const struct sst_info *probe_data; + const struct sst_ipc_info *ipc_info; + const struct sst_res_info *res_info; + const struct sst_lib_dnld_info *lib_info; + const char *platform; +}; int add_sst_platform_device(void); #endif diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 400873450e33..8f3271842533 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -30,9 +30,6 @@ static __always_inline void preempt_count_set(int pc) /* * must be macros to avoid header recursion hell */ -#define task_preempt_count(p) \ - (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED) - #define init_task_preempt_count(p) do { \ task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ } while (0) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index eb71ec794732..a092a0cce0b7 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -127,7 +127,7 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; u32 microcode; -} __attribute__((__aligned__(SMP_CACHE_BYTES))); +}; #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 @@ -151,7 +151,7 @@ extern __u32 cpu_caps_cleared[NCAPINTS]; extern __u32 cpu_caps_set[NCAPINTS]; #ifdef CONFIG_SMP -DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); +DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); #define cpu_data(cpu) per_cpu(cpu_info, cpu) #else #define cpu_info boot_cpu_data @@ -374,13 +374,14 @@ struct lwp_struct { u8 reserved[128]; }; -struct bndregs_struct { - u64 bndregs[8]; +struct bndreg { + u64 lower_bound; + u64 upper_bound; } __packed; -struct bndcsr_struct { - u64 cfg_reg_u; - u64 status_reg; +struct bndcsr { + u64 bndcfgu; + u64 bndstatus; } __packed; struct xsave_hdr_struct { @@ -394,8 +395,8 @@ struct xsave_struct { struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; - struct bndregs_struct bndregs; - struct bndcsr_struct bndcsr; + struct bndreg bndreg[4]; + struct bndcsr bndcsr; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); @@ -893,7 +894,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #else /* - * User space process size. 47bits minus one guard page. + * User space process size. 47bits minus one guard page. The guard + * page is necessary on Intel CPUs: if a SYSCALL instruction is at + * the highest possible canonical userspace address, then that + * syscall will enter the kernel with a non-canonical return + * address, and SYSRET will explode dangerously. We avoid this + * particular problem by preventing anything from being mapped + * at the maximum canonical address. */ #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) @@ -953,6 +960,24 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, extern int get_tsc_mode(unsigned long adr); extern int set_tsc_mode(unsigned int val); +/* Register/unregister a process' MPX related resource */ +#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) +#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) + +#ifdef CONFIG_X86_INTEL_MPX +extern int mpx_enable_management(struct task_struct *tsk); +extern int mpx_disable_management(struct task_struct *tsk); +#else +static inline int mpx_enable_management(struct task_struct *tsk) +{ + return -EINVAL; +} +static inline int mpx_disable_management(struct task_struct *tsk) +{ + return -EINVAL; +} +#endif /* CONFIG_X86_INTEL_MPX */ + extern u16 amd_get_nb_id(int cpu); static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 6f1c3a8a33ab..db257a58571f 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -23,6 +23,15 @@ #define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2) #define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8) +#define SEGMENT_RPL_MASK 0x3 /* + * Bottom two bits of selector give the ring + * privilege level + */ +#define SEGMENT_TI_MASK 0x4 /* Bit 2 is table indicator (LDT/GDT) */ +#define USER_RPL 0x3 /* User mode is privilege level 3 */ +#define SEGMENT_LDT 0x4 /* LDT segment has TI set... */ +#define SEGMENT_GDT 0x0 /* ... GDT has it cleared */ + #ifdef CONFIG_X86_32 /* * The layout of the per-CPU GDT under Linux: @@ -125,16 +134,6 @@ #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ -/* Bottom two bits of selector give the ring privilege level */ -#define SEGMENT_RPL_MASK 0x3 -/* Bit 2 is table indicator (LDT/GDT) */ -#define SEGMENT_TI_MASK 0x4 - -/* User mode is privilege level 3 */ -#define USER_RPL 0x3 -/* LDT segment has TI set, GDT has it cleared */ -#define SEGMENT_LDT 0x4 -#define SEGMENT_GDT 0x0 /* * Matching rules for certain types of segments. @@ -192,17 +191,6 @@ #define get_kernel_rpl() 0 #endif -/* User mode is privilege level 3 */ -#define USER_RPL 0x3 -/* LDT segment has TI set, GDT has it cleared */ -#define SEGMENT_LDT 0x4 -#define SEGMENT_GDT 0x0 - -/* Bottom two bits of selector give the ring privilege level */ -#define SEGMENT_RPL_MASK 0x3 -/* Bit 2 is table indicator (LDT/GDT) */ -#define SEGMENT_TI_MASK 0x4 - #define IDT_ENTRIES 256 #define NUM_EXCEPTION_VECTORS 32 /* Bitmask of exception vectors which push an error code on the stack */ diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 9295016485c9..a4efe477ceab 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -183,8 +183,20 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (arch_spin_is_locked(lock)) + __ticket_t head = ACCESS_ONCE(lock->tickets.head); + + for (;;) { + struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); + /* + * We need to check "unlocked" in a loop, tmp.head == head + * can be false positive because of overflow. + */ + if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) || + tmp.head != head) + break; + cpu_relax(); + } } /* diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index d7f3b3b78ac3..751bf4b7bf11 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -79,12 +79,12 @@ do { \ #else /* CONFIG_X86_32 */ /* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" +#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" #define __EXTRA_CLOBBER \ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ - "r12", "r13", "r14", "r15" + "r12", "r13", "r14", "r15", "flags" #ifdef CONFIG_CC_STACKPROTECTOR #define __switch_canary \ @@ -100,7 +100,11 @@ do { \ #define __switch_canary_iparam #endif /* CC_STACKPROTECTOR */ -/* Save restore flags to clear handle leaking NT */ +/* + * There is no need to save or restore flags, because flags are always + * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL + * has no effect. + */ #define switch_to(prev, next, last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 2d60a7813dfe..fc808b83fccb 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -33,8 +33,8 @@ * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). */ -#define MAX_CPUS_PER_UVHUB 64 -#define MAX_CPUS_PER_SOCKET 32 +#define MAX_CPUS_PER_UVHUB 128 +#define MAX_CPUS_PER_SOCKET 64 #define ADP_SZ 64 /* hardware-provided max. */ #define UV_CPUS_PER_AS 32 /* hardware-provided max. */ #define ITEMS_PER_DESC 8 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 3c3366c2e37f..e7e9682a33e9 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -70,4 +70,23 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s) ++s->seq; } +#ifdef CONFIG_X86_64 + +#define VGETCPU_CPU_MASK 0xfff + +static inline unsigned int __getcpu(void) +{ + unsigned int p; + + /* + * Load per CPU data from GDT. LSL is faster than RDTSCP and + * works on all CPUs. + */ + asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + + return p; +} + +#endif /* CONFIG_X86_64 */ + #endif /* _ASM_X86_VGTOD_H */ diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index 2a46ca720afc..6ba66ee79710 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -4,15 +4,7 @@ #include <linux/seqlock.h> #include <uapi/asm/vsyscall.h> -#define VGETCPU_RDTSCP 1 -#define VGETCPU_LSL 2 - -/* kernel space (writeable) */ -extern int vgetcpu_mode; -extern struct timezone sys_tz; - -#include <asm/vvar.h> - +#ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); /* @@ -20,25 +12,12 @@ extern void map_vsyscall(void); * Returns true if handled. */ extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); - -#ifdef CONFIG_X86_64 - -#define VGETCPU_CPU_MASK 0xfff - -static inline unsigned int __getcpu(void) +#else +static inline void map_vsyscall(void) {} +static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) { - unsigned int p; - - if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { - /* Load per CPU data from RDTSCP */ - native_read_tscp(&p); - } else { - /* Load per CPU data from GDT */ - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); - } - - return p; + return false; } -#endif /* CONFIG_X86_64 */ +#endif #endif /* _ASM_X86_VSYSCALL_H */ diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h index 5d2b9ad2c6d2..3f32dfc2ab73 100644 --- a/arch/x86/include/asm/vvar.h +++ b/arch/x86/include/asm/vvar.h @@ -44,8 +44,6 @@ extern char __vvar_page; /* DECLARE_VVAR(offset, type, name) */ -DECLARE_VVAR(0, volatile unsigned long, jiffies) -DECLARE_VVAR(16, int, vgetcpu_mode) DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) #undef DECLARE_VVAR diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index e45e4da96bf1..f58a9c7a3c86 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -172,7 +172,6 @@ struct x86_platform_ops { struct pci_dev; struct msi_msg; -struct msi_desc; struct x86_msi_ops { int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); @@ -183,8 +182,6 @@ struct x86_msi_ops { void (*teardown_msi_irqs)(struct pci_dev *dev); void (*restore_msi_irqs)(struct pci_dev *dev); int (*setup_hpet_msi)(unsigned int irq, unsigned int id); - u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); - u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); }; struct IO_APIC_route_entry; diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h new file mode 100644 index 000000000000..0d809e9fc975 --- /dev/null +++ b/arch/x86/include/asm/xen/cpuid.h @@ -0,0 +1,91 @@ +/****************************************************************************** + * arch-x86/cpuid.h + * + * CPUID interface to Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2007 Citrix Systems, Inc. + * + * Authors: + * Keir Fraser <keir@xen.org> + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ +#define __XEN_PUBLIC_ARCH_X86_CPUID_H__ + +/* + * For compatibility with other hypervisor interfaces, the Xen cpuid leaves + * can be found at the first otherwise unused 0x100 aligned boundary starting + * from 0x40000000. + * + * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid + * leaves will start at 0x40000100 + */ + +#define XEN_CPUID_FIRST_LEAF 0x40000000 +#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) + +/* + * Leaf 1 (0x40000x00) + * EAX: Largest Xen-information leaf. All leaves up to an including @EAX + * are supported by the Xen host. + * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification + * of a Xen host. + */ +#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ +#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ +#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ + +/* + * Leaf 2 (0x40000x01) + * EAX[31:16]: Xen major version. + * EAX[15: 0]: Xen minor version. + * EBX-EDX: Reserved (currently all zeroes). + */ + +/* + * Leaf 3 (0x40000x02) + * EAX: Number of hypercall transfer pages. This register is always guaranteed + * to specify one hypercall page. + * EBX: Base address of Xen-specific MSRs. + * ECX: Features 1. Unused bits are set to zero. + * EDX: Features 2. Unused bits are set to zero. + */ + +/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ +#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 +#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) + +/* + * Leaf 5 (0x40000x04) + * HVM-specific features + */ + +/* EAX Features */ +/* Virtualized APIC registers */ +#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) +/* Virtualized x2APIC accesses */ +#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) +/* Memory mapped from other domains has valid IOMMU entries */ +#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) + +#define XEN_CPUID_MAX_NUM_LEAVES 4 + +#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h index 7f02fe4e2c7b..acd844c017d3 100644 --- a/arch/x86/include/asm/xen/page-coherent.h +++ b/arch/x86/include/asm/xen/page-coherent.h @@ -22,8 +22,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) { } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index c949923a5668..5eea09915a15 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -41,10 +41,12 @@ typedef struct xpaddr { extern unsigned long *machine_to_phys_mapping; extern unsigned long machine_to_phys_nr; +extern unsigned long *xen_p2m_addr; +extern unsigned long xen_p2m_size; +extern unsigned long xen_max_p2m_pfn; extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); -extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern unsigned long set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e); @@ -52,17 +54,52 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); -extern int m2p_add_override(unsigned long mfn, struct page *page, - struct gnttab_map_grant_ref *kmap_op); extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); -extern int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn); -extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); +/* + * Helper functions to write or read unsigned long values to/from + * memory, when the access may fault. + */ +static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) +{ + return __put_user(val, (unsigned long __user *)addr); +} + +static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) +{ + return __get_user(*val, (unsigned long __user *)addr); +} + +/* + * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine(): + * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator + * bits (identity or foreign) are set. + * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set + * identity or foreign indicator will be still set. __pfn_to_mfn() is + * encapsulating get_phys_to_machine() which is called in special cases only. + * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special + * cases needing an extended handling. + */ +static inline unsigned long __pfn_to_mfn(unsigned long pfn) +{ + unsigned long mfn; + + if (pfn < xen_p2m_size) + mfn = xen_p2m_addr[pfn]; + else if (unlikely(pfn < xen_max_p2m_pfn)) + return get_phys_to_machine(pfn); + else + return IDENTITY_FRAME(pfn); + + if (unlikely(mfn == INVALID_P2M_ENTRY)) + return get_phys_to_machine(pfn); + + return mfn; +} + static inline unsigned long pfn_to_mfn(unsigned long pfn) { unsigned long mfn; @@ -70,7 +107,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; - mfn = get_phys_to_machine(pfn); + mfn = __pfn_to_mfn(pfn); if (mfn != INVALID_P2M_ENTRY) mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); @@ -83,7 +120,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; - return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; + return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY; } static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) @@ -102,7 +139,7 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) * In such cases it doesn't matter what we return (we return garbage), * but we must handle the fault without crashing! */ - ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); + ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn); if (ret < 0) return ~0; @@ -117,7 +154,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) return mfn; pfn = mfn_to_pfn_no_overrides(mfn); - if (get_phys_to_machine(pfn) != mfn) { + if (__pfn_to_mfn(pfn) != mfn) { /* * If this appears to be a foreign mfn (because the pfn * doesn't map back to the mfn), then check the local override @@ -133,8 +170,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) * entry doesn't map back to the mfn and m2p_override doesn't have a * valid entry for it. */ - if (pfn == ~0 && - get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn)) + if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) pfn = mfn; return pfn; @@ -180,7 +216,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) return mfn; pfn = mfn_to_pfn(mfn); - if (get_phys_to_machine(pfn) != mfn) + if (__pfn_to_mfn(pfn) != mfn) return -1; /* force !pfn_valid() */ return pfn; } @@ -236,4 +272,11 @@ void make_lowmem_page_readwrite(void *vaddr); #define xen_remap(cookie, size) ioremap((cookie), (size)); #define xen_unmap(cookie) iounmap((cookie)) +static inline bool xen_arch_need_swiotlb(struct device *dev, + unsigned long pfn, + unsigned long mfn) +{ + return false; +} + #endif /* _ASM_X86_XEN_PAGE_H */ diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index e21331ce368f..c8aa65d56027 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -152,6 +152,45 @@ #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 +/* Hardware P state interface */ +#define MSR_PPERF 0x0000064e +#define MSR_PERF_LIMIT_REASONS 0x0000064f +#define MSR_PM_ENABLE 0x00000770 +#define MSR_HWP_CAPABILITIES 0x00000771 +#define MSR_HWP_REQUEST_PKG 0x00000772 +#define MSR_HWP_INTERRUPT 0x00000773 +#define MSR_HWP_REQUEST 0x00000774 +#define MSR_HWP_STATUS 0x00000777 + +/* CPUID.6.EAX */ +#define HWP_BASE_BIT (1<<7) +#define HWP_NOTIFICATIONS_BIT (1<<8) +#define HWP_ACTIVITY_WINDOW_BIT (1<<9) +#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) +#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) + +/* IA32_HWP_CAPABILITIES */ +#define HWP_HIGHEST_PERF(x) (x & 0xff) +#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) +#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) +#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) + +/* IA32_HWP_REQUEST */ +#define HWP_MIN_PERF(x) (x & 0xff) +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) +#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) +#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) +#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) +#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) + +/* IA32_HWP_STATUS */ +#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) +#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) + +/* IA32_HWP_INTERRUPT */ +#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) +#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) + #define MSR_AMD64_MC0_MASK 0xc0010044 #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) @@ -206,6 +245,7 @@ #define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) #define MSR_AMD64_IBSCTL 0xc001103a #define MSR_AMD64_IBSBRTARGET 0xc001103b +#define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ /* Fam 16h MSRs */ @@ -345,6 +385,8 @@ #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 +#define MSR_MISC_PWR_MGMT 0x000001aa + #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 #define ENERGY_PERF_BIAS_PERFORMANCE 0 #define ENERGY_PERF_BIAS_NORMAL 6 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 8f1e77440b2b..5d4502c8b983 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -28,8 +28,7 @@ obj-$(CONFIG_X86_32) += i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += mcount_64.o obj-y += syscall_$(BITS).o vsyscall_gtod.o -obj-$(CONFIG_X86_64) += vsyscall_64.o -obj-$(CONFIG_X86_64) += vsyscall_emu_64.o +obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_SYSFS) += ksysfs.o obj-y += bootflag.o e820.o diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 4128b5fcb559..c2fd21fed002 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -40,7 +40,7 @@ static unsigned int get_apic_id(unsigned long x) unsigned int id; rdmsrl(MSR_FAM10H_NODE_ID, value); - id = ((x >> 24) & 0xffU) | ((value << 2) & 0x3f00U); + id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); return id; } @@ -145,7 +145,7 @@ static void numachip_send_IPI_all(int vector) static void numachip_send_IPI_self(int vector) { - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); + apic_write(APIC_SELF_IPI, vector); } static int __init numachip_probe(void) @@ -153,20 +153,8 @@ static int __init numachip_probe(void) return apic == &apic_numachip; } -static void __init map_csrs(void) -{ - printk(KERN_INFO "NumaChip: Mapping local CSR space (%016llx - %016llx)\n", - NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_BASE + NUMACHIP_LCSR_SIZE - 1); - init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE); - - printk(KERN_INFO "NumaChip: Mapping global CSR space (%016llx - %016llx)\n", - NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_BASE + NUMACHIP_GCSR_SIZE - 1); - init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE); -} - static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) { - if (c->phys_proc_id != node) { c->phys_proc_id = node; per_cpu(cpu_llc_id, smp_processor_id()) = node; @@ -175,19 +163,15 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) static int __init numachip_system_init(void) { - unsigned int val; - if (!numachip_system) return 0; + init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE); + init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE); + x86_cpuinit.fixup_cpu_id = fixup_cpu_id; x86_init.pci.arch_init = pci_numachip_init; - map_csrs(); - - val = read_lcsr(CSR_G0_NODE_IDS); - printk(KERN_INFO "NumaChip: Local NodeID = %08x\n", val); - return 0; } early_initcall(numachip_system_init); diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 6a1e71bde323..6873ab925d00 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -18,6 +18,7 @@ #include <linux/nmi.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/seq_buf.h> #ifdef CONFIG_HARDLOCKUP_DETECTOR u64 hw_nmi_get_sample_period(int watchdog_thresh) @@ -29,14 +30,35 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh) #ifdef arch_trigger_all_cpu_backtrace /* For reliability, we're prepared to waste bits here. */ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; +static cpumask_t printtrace_mask; + +#define NMI_BUF_SIZE 4096 + +struct nmi_seq_buf { + unsigned char buffer[NMI_BUF_SIZE]; + struct seq_buf seq; +}; + +/* Safe printing in NMI context */ +static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); /* "in progress" flag of arch_trigger_all_cpu_backtrace */ static unsigned long backtrace_flag; +static void print_seq_line(struct nmi_seq_buf *s, int start, int end) +{ + const char *buf = s->buffer + start; + + printk("%.*s", (end - start) + 1, buf); +} + void arch_trigger_all_cpu_backtrace(bool include_self) { + struct nmi_seq_buf *s; + int len; + int cpu; int i; - int cpu = get_cpu(); + int this_cpu = get_cpu(); if (test_and_set_bit(0, &backtrace_flag)) { /* @@ -49,7 +71,17 @@ void arch_trigger_all_cpu_backtrace(bool include_self) cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); if (!include_self) - cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); + cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); + + cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask)); + /* + * Set up per_cpu seq_buf buffers that the NMIs running on the other + * CPUs will write to. + */ + for_each_cpu(cpu, to_cpumask(backtrace_mask)) { + s = &per_cpu(nmi_print_seq, cpu); + seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE); + } if (!cpumask_empty(to_cpumask(backtrace_mask))) { pr_info("sending NMI to %s CPUs:\n", @@ -65,11 +97,58 @@ void arch_trigger_all_cpu_backtrace(bool include_self) touch_softlockup_watchdog(); } + /* + * Now that all the NMIs have triggered, we can dump out their + * back traces safely to the console. + */ + for_each_cpu(cpu, &printtrace_mask) { + int last_i = 0; + + s = &per_cpu(nmi_print_seq, cpu); + len = seq_buf_used(&s->seq); + if (!len) + continue; + + /* Print line by line. */ + for (i = 0; i < len; i++) { + if (s->buffer[i] == '\n') { + print_seq_line(s, last_i, i); + last_i = i + 1; + } + } + /* Check if there was a partial line. */ + if (last_i < len) { + print_seq_line(s, last_i, len - 1); + pr_cont("\n"); + } + } + clear_bit(0, &backtrace_flag); smp_mb__after_atomic(); put_cpu(); } +/* + * It is not safe to call printk() directly from NMI handlers. + * It may be fine if the NMI detected a lock up and we have no choice + * but to do so, but doing a NMI on all other CPUs to get a back trace + * can be done with a sysrq-l. We don't want that to lock up, which + * can happen if the NMI interrupts a printk in progress. + * + * Instead, we redirect the vprintk() to this nmi_vprintk() that writes + * the content into a per cpu seq_buf buffer. Then when the NMIs are + * all done, we can safely dump the contents of the seq_buf to a printk() + * from a non NMI context. + */ +static int nmi_vprintk(const char *fmt, va_list args) +{ + struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); + unsigned int len = seq_buf_used(&s->seq); + + seq_buf_vprintf(&s->seq, fmt, args); + return seq_buf_used(&s->seq) - len; +} + static int arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { @@ -78,12 +157,14 @@ arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { - static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; + printk_func_t printk_func_save = this_cpu_read(printk_func); - arch_spin_lock(&lock); + /* Replace printk to write into the NMI seq */ + this_cpu_write(printk_func, nmi_vprintk); printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); show_regs(regs); - arch_spin_unlock(&lock); + this_cpu_write(printk_func, printk_func_save); + cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return NMI_HANDLED; } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1183d545da1e..7ffe0a2b870f 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3158,7 +3158,7 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); - __write_msi_msg(data->msi_desc, &msg); + __pci_write_msi_msg(data->msi_desc, &msg); return IRQ_SET_MASK_OK_NOCOPY; } @@ -3169,8 +3169,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) */ static struct irq_chip msi_chip = { .name = "PCI-MSI", - .irq_unmask = unmask_msi_irq, - .irq_mask = mask_msi_irq, + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, .irq_ack = ack_apic_edge, .irq_set_affinity = msi_set_affinity, .irq_retrigger = ioapic_retrigger_irq, @@ -3196,7 +3196,7 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. */ if (!irq_offset) - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); setup_remapped_irq(irq, irq_cfg(irq), chip); diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 584874451414..927ec9235947 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -378,7 +378,6 @@ static struct cpuidle_driver apm_idle_driver = { { /* entry 1 is for APM idle */ .name = "APM", .desc = "APM idle", - .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 250, /* WAG */ .target_residency = 500, /* WAG */ .enter = &apm_cpu_idle diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index d67c4be3e8b1..3b3b9d33ac1d 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -1,3 +1,7 @@ +#ifndef __LINUX_KBUILD_H +# error "Please do not build this file directly, build asm-offsets.c instead" +#endif + #include <asm/ucontext.h> #include <linux/lguest.h> diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index e7c798b354fa..fdcbb4d27c9f 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -1,3 +1,7 @@ +#ifndef __LINUX_KBUILD_H +# error "Please do not build this file directly, build asm-offsets.c instead" +#endif + #include <asm/ia32.h> #define __SYSCALL_64(nr, sym, compat) [nr] = 1, @@ -48,7 +52,6 @@ int main(void) #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) ENTRY(bx); - ENTRY(bx); ENTRY(cx); ENTRY(dx); ENTRY(sp); diff --git a/arch/x86/kernel/audit_64.c b/arch/x86/kernel/audit_64.c index 06d3e5a14d9d..f3672508b249 100644 --- a/arch/x86/kernel/audit_64.c +++ b/arch/x86/kernel/audit_64.c @@ -50,6 +50,7 @@ int audit_classify_syscall(int abi, unsigned syscall) case __NR_openat: return 3; case __NR_execve: + case __NR_execveat: return 5; default: return 0; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 813d29d00a17..15c5df92f74e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -566,6 +566,17 @@ static void init_amd_k8(struct cpuinfo_x86 *c) if (!c->x86_model_id[0]) strcpy(c->x86_model_id, "Hammer"); + +#ifdef CONFIG_SMP + /* + * Disable TLB flush filter by setting HWCR.FFDIS on K8 + * bit 6 of msr C001_0015 + * + * Errata 63 for SH-B3 steppings + * Errata 122 for all steppings (F+ have it disabled by default) + */ + msr_set_bit(MSR_K7_HWCR, 6); +#endif } static void init_amd_gh(struct cpuinfo_x86 *c) @@ -636,18 +647,6 @@ static void init_amd(struct cpuinfo_x86 *c) { u32 dummy; -#ifdef CONFIG_SMP - /* - * Disable TLB flush filter by setting HWCR.FFDIS on K8 - * bit 6 of msr C001_0015 - * - * Errata 63 for SH-B3 steppings - * Errata 122 for all steppings (F+ have it disabled by default) - */ - if (c->x86 == 0xf) - msr_set_bit(MSR_K7_HWCR, 6); -#endif - early_init_amd(c); /* diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cfa9b5b2c27a..c6049650c093 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -958,14 +958,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) } #ifdef CONFIG_X86_64 -static void vgetcpu_set_mode(void) -{ - if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) - vgetcpu_mode = VGETCPU_RDTSCP; - else - vgetcpu_mode = VGETCPU_LSL; -} - #ifdef CONFIG_IA32_EMULATION /* May not be __init: called during resume */ static void syscall32_cpu_init(void) @@ -1008,8 +1000,6 @@ void __init identify_boot_cpu(void) #ifdef CONFIG_X86_32 sysenter_setup(); enable_sep_cpu(); -#else - vgetcpu_set_mode(); #endif cpu_detect_tlb(&boot_cpu_data); } diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 09edd0b65fef..10b46906767f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -3,6 +3,8 @@ enum severity_level { MCE_NO_SEVERITY, + MCE_DEFERRED_SEVERITY, + MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY, MCE_KEEP_SEVERITY, MCE_SOME_SEVERITY, MCE_AO_SEVERITY, @@ -21,7 +23,7 @@ struct mce_bank { char attrname[ATTR_LEN]; /* attribute name */ }; -int mce_severity(struct mce *a, int tolerant, char **msg); +int mce_severity(struct mce *a, int tolerant, char **msg, bool is_excp); struct dentry *mce_get_debugfs_dir(void); extern struct mce_bank *mce_banks; diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index c370e1c4468b..8bb433043a7f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -31,6 +31,7 @@ enum context { IN_KERNEL = 1, IN_USER = 2 }; enum ser { SER_REQUIRED = 1, NO_SER = 2 }; +enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2 }; static struct severity { u64 mask; @@ -40,6 +41,7 @@ static struct severity { unsigned char mcgres; unsigned char ser; unsigned char context; + unsigned char excp; unsigned char covered; char *msg; } severities[] = { @@ -48,6 +50,8 @@ static struct severity { #define USER .context = IN_USER #define SER .ser = SER_REQUIRED #define NOSER .ser = NO_SER +#define EXCP .excp = EXCP_CONTEXT +#define NOEXCP .excp = NO_EXCP #define BITCLR(x) .mask = x, .result = 0 #define BITSET(x) .mask = x, .result = x #define MCGMASK(x, y) .mcgmask = x, .mcgres = y @@ -62,7 +66,7 @@ static struct severity { ), MCESEV( NO, "Not enabled", - BITCLR(MCI_STATUS_EN) + EXCP, BITCLR(MCI_STATUS_EN) ), MCESEV( PANIC, "Processor context corrupt", @@ -71,16 +75,20 @@ static struct severity { /* When MCIP is not set something is very confused */ MCESEV( PANIC, "MCIP not set in MCA handler", - MCGMASK(MCG_STATUS_MCIP, 0) + EXCP, MCGMASK(MCG_STATUS_MCIP, 0) ), /* Neither return not error IP -- no chance to recover -> PANIC */ MCESEV( PANIC, "Neither restart nor error IP", - MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0) + EXCP, MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0) ), MCESEV( PANIC, "In kernel and no restart IP", - KERNEL, MCGMASK(MCG_STATUS_RIPV, 0) + EXCP, KERNEL, MCGMASK(MCG_STATUS_RIPV, 0) + ), + MCESEV( + DEFERRED, "Deferred error", + NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED) ), MCESEV( KEEP, "Corrected error", @@ -89,7 +97,7 @@ static struct severity { /* ignore OVER for UCNA */ MCESEV( - KEEP, "Uncorrected no action required", + UCNA, "Uncorrected no action required", SER, MASK(MCI_UC_SAR, MCI_STATUS_UC) ), MCESEV( @@ -178,8 +186,9 @@ static int error_context(struct mce *m) return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL; } -int mce_severity(struct mce *m, int tolerant, char **msg) +int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp) { + enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP); enum context ctx = error_context(m); struct severity *s; @@ -194,6 +203,8 @@ int mce_severity(struct mce *m, int tolerant, char **msg) continue; if (s->context && ctx != s->context) continue; + if (s->excp && excp != s->excp) + continue; if (msg) *msg = s->msg; s->covered = 1; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 61a9668cebfd..d2c611699cd9 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -292,10 +292,10 @@ static void print_mce(struct mce *m) #define PANIC_TIMEOUT 5 /* 5 seconds */ -static atomic_t mce_paniced; +static atomic_t mce_panicked; static int fake_panic; -static atomic_t mce_fake_paniced; +static atomic_t mce_fake_panicked; /* Panic in progress. Enable interrupts and wait for final IPI */ static void wait_for_panic(void) @@ -319,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) /* * Make sure only one CPU runs in machine check panic */ - if (atomic_inc_return(&mce_paniced) > 1) + if (atomic_inc_return(&mce_panicked) > 1) wait_for_panic(); barrier(); @@ -327,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) console_verbose(); } else { /* Don't log too much for fake panic */ - if (atomic_inc_return(&mce_fake_paniced) > 1) + if (atomic_inc_return(&mce_fake_panicked) > 1) return; } /* First print corrected ones that are still unlogged */ @@ -575,6 +575,37 @@ static void mce_read_aux(struct mce *m, int i) } } +static bool memory_error(struct mce *m) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor == X86_VENDOR_AMD) { + /* + * coming soon + */ + return false; + } else if (c->x86_vendor == X86_VENDOR_INTEL) { + /* + * Intel SDM Volume 3B - 15.9.2 Compound Error Codes + * + * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for + * indicating a memory error. Bit 8 is used for indicating a + * cache hierarchy error. The combination of bit 2 and bit 3 + * is used for indicating a `generic' cache hierarchy error + * But we can't just blindly check the above bits, because if + * bit 11 is set, then it is a bus/interconnect error - and + * either way the above bits just gives more detail on what + * bus/interconnect error happened. Note that bit 12 can be + * ignored, as it's the "filter" bit. + */ + return (m->status & 0xef80) == BIT(7) || + (m->status & 0xef00) == BIT(8) || + (m->status & 0xeffc) == 0xc; + } + + return false; +} + DEFINE_PER_CPU(unsigned, mce_poll_count); /* @@ -595,6 +626,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) { struct mce m; + int severity; int i; this_cpu_inc(mce_poll_count); @@ -630,6 +662,20 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) if (!(flags & MCP_TIMESTAMP)) m.tsc = 0; + + severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); + + /* + * In the cases where we don't have a valid address after all, + * do not add it into the ring buffer. + */ + if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) { + if (m.status & MCI_STATUS_ADDRV) { + mce_ring_add(m.addr >> PAGE_SHIFT); + mce_schedule_work(); + } + } + /* * Don't get the IP here because it's unlikely to * have anything to do with the actual error location. @@ -668,7 +714,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); } - if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY) + if (mce_severity(m, mca_cfg.tolerant, msg, true) >= + MCE_PANIC_SEVERITY) ret = 1; } return ret; @@ -697,7 +744,7 @@ static int mce_timed_out(u64 *t) * might have been modified by someone else. */ rmb(); - if (atomic_read(&mce_paniced)) + if (atomic_read(&mce_panicked)) wait_for_panic(); if (!mca_cfg.monarch_timeout) goto out; @@ -754,7 +801,7 @@ static void mce_reign(void) for_each_possible_cpu(cpu) { int severity = mce_severity(&per_cpu(mces_seen, cpu), mca_cfg.tolerant, - &nmsg); + &nmsg, true); if (severity > global_worst) { msg = nmsg; global_worst = severity; @@ -1095,13 +1142,14 @@ void do_machine_check(struct pt_regs *regs, long error_code) */ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - severity = mce_severity(&m, cfg->tolerant, NULL); + severity = mce_severity(&m, cfg->tolerant, NULL, true); /* - * When machine check was for corrected handler don't touch, - * unless we're panicing. + * When machine check was for corrected/deferred handler don't + * touch, unless we're panicing. */ - if (severity == MCE_KEEP_SEVERITY && !no_way_out) + if ((severity == MCE_KEEP_SEVERITY || + severity == MCE_UCNA_SEVERITY) && !no_way_out) continue; __set_bit(i, toclear); if (severity == MCE_NO_SEVERITY) { @@ -2520,7 +2568,7 @@ struct dentry *mce_get_debugfs_dir(void) static void mce_reset(void) { cpu_missing = 0; - atomic_set(&mce_fake_paniced, 0); + atomic_set(&mce_fake_panicked, 0); atomic_set(&mce_executing, 0); atomic_set(&mce_callin, 0); atomic_set(&global_nwo, 0); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5d4999f95aec..f1c3769bbd64 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -212,12 +212,12 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) unsigned int cpu = smp_processor_id(); u32 low = 0, high = 0, address = 0; unsigned int bank, block; - int offset = -1; + int offset = -1, new; for (bank = 0; bank < mca_cfg.banks; ++bank) { for (block = 0; block < NR_BLOCKS; ++block) { if (block == 0) - address = MSR_IA32_MC0_MISC + bank * 4; + address = MSR_IA32_MCx_MISC(bank); else if (block == 1) { address = (low & MASK_BLKPTR_LO) >> 21; if (!address) @@ -247,13 +247,18 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) b.address = address; b.interrupt_capable = lvt_interrupt_supported(bank, high); - if (b.interrupt_capable) { - int new = (high & MASK_LVTOFF_HI) >> 20; - offset = setup_APIC_mce(offset, new); - } + if (!b.interrupt_capable) + goto init; + + new = (high & MASK_LVTOFF_HI) >> 20; + offset = setup_APIC_mce(offset, new); + + if ((offset == new) && + (mce_threshold_vector != amd_threshold_interrupt)) + mce_threshold_vector = amd_threshold_interrupt; +init: mce_threshold_block_init(&b, offset); - mce_threshold_vector = amd_threshold_interrupt; } } } @@ -270,18 +275,17 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) static void amd_threshold_interrupt(void) { u32 low = 0, high = 0, address = 0; + int cpu = smp_processor_id(); unsigned int bank, block; struct mce m; - mce_setup(&m); - /* assume first bank caused it */ for (bank = 0; bank < mca_cfg.banks; ++bank) { - if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) + if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; for (block = 0; block < NR_BLOCKS; ++block) { if (block == 0) { - address = MSR_IA32_MC0_MISC + bank * 4; + address = MSR_IA32_MCx_MISC(bank); } else if (block == 1) { address = (low & MASK_BLKPTR_LO) >> 21; if (!address) @@ -309,21 +313,20 @@ static void amd_threshold_interrupt(void) * Log the machine check that caused the threshold * event. */ - machine_check_poll(MCP_TIMESTAMP, - this_cpu_ptr(&mce_poll_banks)); - - if (high & MASK_OVERFLOW_HI) { - rdmsrl(address, m.misc); - rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, - m.status); - m.bank = K8_MCE_THRESHOLD_BASE - + bank * NR_BLOCKS - + block; - mce_log(&m); - return; - } + if (high & MASK_OVERFLOW_HI) + goto log; } } + return; + +log: + mce_setup(&m); + rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status); + m.misc = ((u64)high << 32) | low; + m.bank = bank; + mce_log(&m); + + wrmsrl(MSR_IA32_MCx_STATUS(bank), 0); } /* @@ -617,8 +620,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) } } - err = allocate_threshold_blocks(cpu, bank, 0, - MSR_IA32_MC0_MISC + bank * 4); + err = allocate_threshold_blocks(cpu, bank, 0, MSR_IA32_MCx_MISC(bank)); if (!err) goto out; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 8fffd845e22b..bfbbe6195e2d 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -376,7 +376,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, return UCODE_OK; } -enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) +enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) { enum ucode_state ret; @@ -390,8 +390,8 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) /* save BSP's matching patch for early load */ - if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { - struct ucode_patch *p = find_patch(smp_processor_id()); + if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { + struct ucode_patch *p = find_patch(cpu); if (p) { memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), @@ -444,7 +444,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, goto fw_release; } - ret = load_microcode_amd(c->x86, fw->data, fw->size); + ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); fw_release: release_firmware(fw); diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 06674473b0e6..737737edbd1e 100644 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -389,7 +389,7 @@ int __init save_microcode_in_initrd_amd(void) eax = cpuid_eax(0x00000001); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); - ret = load_microcode_amd(eax, container, container_size); + ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); if (ret != UCODE_OK) retval = -EINVAL; @@ -402,3 +402,21 @@ int __init save_microcode_in_initrd_amd(void) return retval; } + +void reload_ucode_amd(void) +{ + struct microcode_amd *mc; + u32 rev, eax; + + rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); + + mc = (struct microcode_amd *)amd_ucode_patch; + + if (mc && rev < mc->hdr.patch_id) { + if (!__apply_microcode_amd(mc)) { + ucode_new_rev = mc->hdr.patch_id; + pr_info("microcode: reload patch_level=0x%08x\n", + ucode_new_rev); + } + } +} diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 08fe6e8a726e..15c29096136b 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -465,16 +465,8 @@ static void mc_bp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); -#ifdef CONFIG_X86_64 else if (!uci->mc) - /* - * We might resume and not have applied late microcode but still - * have a newer patch stashed from the early loader. We don't - * have it in uci->mc so we have to load it the same way we're - * applying patches early on the APs. - */ - load_ucode_ap(); -#endif + reload_early_microcode(); } static struct syscore_ops mc_syscore_ops = { @@ -559,7 +551,7 @@ static int __init microcode_init(void) struct cpuinfo_x86 *c = &cpu_data(0); int error; - if (dis_ucode_ldr) + if (paravirt_enabled() || dis_ucode_ldr) return 0; if (c->x86_vendor == X86_VENDOR_INTEL) diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c index 2c017f242a78..d45df4bd16ab 100644 --- a/arch/x86/kernel/cpu/microcode/core_early.c +++ b/arch/x86/kernel/cpu/microcode/core_early.c @@ -176,3 +176,24 @@ int __init save_microcode_in_initrd(void) return 0; } + +void reload_early_microcode(void) +{ + int vendor, x86; + + vendor = x86_vendor(); + x86 = x86_family(); + + switch (vendor) { + case X86_VENDOR_INTEL: + if (x86 >= 6) + reload_ucode_intel(); + break; + case X86_VENDOR_AMD: + if (x86 >= 0x10) + reload_ucode_amd(); + break; + default: + break; + } +} diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index b88343f7a3b3..ec9df6f9cd47 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -650,8 +650,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci) } #endif -static int apply_microcode_early(struct mc_saved_data *mc_saved_data, - struct ucode_cpu_info *uci) +static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) { struct microcode_intel *mc_intel; unsigned int val[2]; @@ -680,7 +679,10 @@ static int apply_microcode_early(struct mc_saved_data *mc_saved_data, #endif uci->cpu_sig.rev = val[1]; - print_ucode(uci); + if (early) + print_ucode(uci); + else + print_ucode_info(uci, mc_intel->hdr.date); return 0; } @@ -715,12 +717,17 @@ _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, unsigned long initrd_end_early, struct ucode_cpu_info *uci) { + enum ucode_state ret; + collect_cpu_info_early(uci); scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data, mc_saved_in_initrd, uci); - load_microcode(mc_saved_data, mc_saved_in_initrd, - initrd_start_early, uci); - apply_microcode_early(mc_saved_data, uci); + + ret = load_microcode(mc_saved_data, mc_saved_in_initrd, + initrd_start_early, uci); + + if (ret == UCODE_OK) + apply_microcode_early(uci, true); } void __init @@ -749,7 +756,8 @@ load_ucode_intel_bsp(void) initrd_end_early = initrd_start_early + ramdisk_size; _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, - initrd_start_early, initrd_end_early, &uci); + initrd_start_early, initrd_end_early, + &uci); #endif } @@ -783,5 +791,23 @@ void load_ucode_intel_ap(void) collect_cpu_info_early(&uci); load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, initrd_start_addr, &uci); - apply_microcode_early(mc_saved_data_p, &uci); + apply_microcode_early(&uci, true); +} + +void reload_ucode_intel(void) +{ + struct ucode_cpu_info uci; + enum ucode_state ret; + + if (!mc_saved_data.mc_saved_count) + return; + + collect_cpu_info_early(&uci); + + ret = generic_load_microcode_early(mc_saved_data.mc_saved, + mc_saved_data.mc_saved_count, &uci); + if (ret != UCODE_OK) + return; + + apply_microcode_early(&uci, false); } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fc5eb390b368..4e6cdb0ddc70 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -253,6 +253,10 @@ struct cpu_hw_events { #define INTEL_UEVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) +/* Like UEVENT_CONSTRAINT, but match flags too */ +#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) + #define INTEL_PLD_CONSTRAINT(c, n) \ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index cbb1be3ed9e4..a61f5c6911da 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -565,6 +565,21 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) perf_ibs->offset_max, offset + 1); } while (offset < offset_max); + if (event->attr.sample_type & PERF_SAMPLE_RAW) { + /* + * Read IbsBrTarget and IbsOpData4 separately + * depending on their availability. + * Can't add to offset_max as they are staggered + */ + if (ibs_caps & IBS_CAPS_BRNTRGT) { + rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); + size++; + } + if (ibs_caps & IBS_CAPS_OPDATA4) { + rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + size++; + } + } ibs_data.size = sizeof(u64) * size; regs = *iregs; diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c index 639d1289b1ba..97242a9242bd 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c @@ -130,10 +130,7 @@ static ssize_t _iommu_cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { - int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &iommu_cpumask); - buf[n++] = '\n'; - buf[n] = '\0'; - return n; + return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask); } static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL); diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index 30790d798e6b..cc6cedb8f25d 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c @@ -219,7 +219,6 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { - int n; cpumask_t *active_mask; struct pmu *pmu = dev_get_drvdata(dev); @@ -230,10 +229,7 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev, else return 0; - n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask); - buf[n++] = '\n'; - buf[n] = '\0'; - return n; + return cpumap_print_to_pagebuf(true, buf, active_mask); } static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL); diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 46211bcc813e..3c895d480cd7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -552,18 +552,18 @@ int intel_pmu_drain_bts_buffer(void) * PEBS */ struct event_constraint intel_core2_pebs_event_constraints[] = { - INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ - INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ - INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ - INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ - INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_atom_pebs_event_constraints[] = { - INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ - INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ - INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; @@ -577,36 +577,36 @@ struct event_constraint intel_slm_pebs_event_constraints[] = { struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ - INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ - INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ - INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ - INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ - INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_snb_pebs_event_constraints[] = { - INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ @@ -617,7 +617,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { }; struct event_constraint intel_ivb_pebs_event_constraints[] = { - INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ @@ -628,7 +628,7 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { }; struct event_constraint intel_hsw_pebs_event_constraints[] = { - INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), @@ -724,6 +724,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) unsigned long ip = regs->ip; int is_64bit = 0; void *kaddr; + int size; /* * We don't need to fixup if the PEBS assist is fault like @@ -758,11 +759,12 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) return 1; } + size = ip - to; if (!kernel_ip(ip)) { - int size, bytes; + int bytes; u8 *buf = this_cpu_read(insn_buffer); - size = ip - to; /* Must fit our buffer, see above */ + /* 'size' must fit our buffer, see above */ bytes = copy_from_user_nmi(buf, (void __user *)to, size); if (bytes != 0) return 0; @@ -780,11 +782,20 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) #ifdef CONFIG_X86_64 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); #endif - insn_init(&insn, kaddr, is_64bit); + insn_init(&insn, kaddr, size, is_64bit); insn_get_length(&insn); + /* + * Make sure there was not a problem decoding the + * instruction and getting the length. This is + * doubly important because we have an infinite + * loop if insn.length=0. + */ + if (!insn.length) + break; to += insn.length; kaddr += insn.length; + size -= insn.length; } while (to < ip); if (to == ip) { @@ -886,6 +897,29 @@ static void __intel_pmu_pebs_event(struct perf_event *event, regs.bp = pebs->bp; regs.sp = pebs->sp; + if (sample_type & PERF_SAMPLE_REGS_INTR) { + regs.ax = pebs->ax; + regs.bx = pebs->bx; + regs.cx = pebs->cx; + regs.dx = pebs->dx; + regs.si = pebs->si; + regs.di = pebs->di; + regs.bp = pebs->bp; + regs.sp = pebs->sp; + + regs.flags = pebs->flags; +#ifndef CONFIG_X86_32 + regs.r8 = pebs->r8; + regs.r9 = pebs->r9; + regs.r10 = pebs->r10; + regs.r11 = pebs->r11; + regs.r12 = pebs->r12; + regs.r13 = pebs->r13; + regs.r14 = pebs->r14; + regs.r15 = pebs->r15; +#endif + } + if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { regs.ip = pebs->real_ip; regs.flags |= PERF_EFLAGS_EXACT; diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 45fa730a5283..58f1a94beaf0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -465,7 +465,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) { struct insn insn; void *addr; - int bytes, size = MAX_INSN_SIZE; + int bytes_read, bytes_left; int ret = X86_BR_NONE; int ext, to_plm, from_plm; u8 buf[MAX_INSN_SIZE]; @@ -493,8 +493,10 @@ static int branch_type(unsigned long from, unsigned long to, int abort) return X86_BR_NONE; /* may fail if text not present */ - bytes = copy_from_user_nmi(buf, (void __user *)from, size); - if (bytes != 0) + bytes_left = copy_from_user_nmi(buf, (void __user *)from, + MAX_INSN_SIZE); + bytes_read = MAX_INSN_SIZE - bytes_left; + if (!bytes_read) return X86_BR_NONE; addr = buf; @@ -505,10 +507,19 @@ static int branch_type(unsigned long from, unsigned long to, int abort) * Ensure we don't blindy read any address by validating it is * a known text address. */ - if (kernel_text_address(from)) + if (kernel_text_address(from)) { addr = (void *)from; - else + /* + * Assume we can get the maximum possible size + * when grabbing kernel data. This is not + * _strictly_ true since we could possibly be + * executing up next to a memory hole, but + * it is very unlikely to be a problem. + */ + bytes_read = MAX_INSN_SIZE; + } else { return X86_BR_NONE; + } } /* @@ -518,8 +529,10 @@ static int branch_type(unsigned long from, unsigned long to, int abort) #ifdef CONFIG_X86_64 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); #endif - insn_init(&insn, addr, is64); + insn_init(&insn, addr, bytes_read, is64); insn_get_opcode(&insn); + if (!insn.opcode.got) + return X86_BR_ABORT; switch (insn.opcode.bytes[0]) { case 0xf: diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index d64f275fe274..673f930c700f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -365,11 +365,7 @@ static void rapl_pmu_event_read(struct perf_event *event) static ssize_t rapl_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { - int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask); - - buf[n++] = '\n'; - buf[n] = '\0'; - return n; + return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask); } static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 9762dbd9f3f7..08f3fed2b0f2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -647,11 +647,7 @@ static int uncore_pmu_event_init(struct perf_event *event) static ssize_t uncore_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { - int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask); - - buf[n++] = '\n'; - buf[n] = '\0'; - return n; + return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask); } static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index f9ed429d6e4f..745b158e9a65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -449,7 +449,11 @@ static struct attribute *snbep_uncore_qpi_formats_attr[] = { static struct uncore_event_desc snbep_uncore_imc_events[] = { INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), + INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), + INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), { /* end: all zeroes */ }, }; @@ -2036,7 +2040,11 @@ static struct intel_uncore_type hswep_uncore_ha = { static struct uncore_event_desc hswep_uncore_imc_events[] = { INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), + INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), + INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), { /* end: all zeroes */ }, }; diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 5433658e598d..e7d8c7608471 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -72,7 +72,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_mask || c->cpuid_level >= 0) seq_printf(m, "stepping\t: %d\n", c->x86_mask); else - seq_printf(m, "stepping\t: unknown\n"); + seq_puts(m, "stepping\t: unknown\n"); if (c->microcode) seq_printf(m, "microcode\t: 0x%x\n", c->microcode); @@ -92,12 +92,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) show_cpuinfo_core(m, c, cpu); show_cpuinfo_misc(m, c); - seq_printf(m, "flags\t\t:"); + seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) if (cpu_has(c, i) && x86_cap_flags[i] != NULL) seq_printf(m, " %s", x86_cap_flags[i]); - seq_printf(m, "\nbugs\t\t:"); + seq_puts(m, "\nbugs\t\t:"); for (i = 0; i < 32*NBUGINTS; i++) { unsigned int bug_bit = 32*NCAPINTS + i; @@ -118,7 +118,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", c->x86_phys_bits, c->x86_virt_bits); - seq_printf(m, "power management:"); + seq_puts(m, "power management:"); for (i = 0; i < 32; i++) { if (c->x86_power & (1 << i)) { if (i < ARRAY_SIZE(x86_power_flags) && @@ -131,7 +131,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) } } - seq_printf(m, "\n\n"); + seq_puts(m, "\n\n"); return 0; } diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 4a8013d55947..60639093d536 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -36,6 +36,11 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, + { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, + { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, + { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, + { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, + { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 3225ae6c5180..83741a71558f 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -143,7 +143,7 @@ static int cpuid_device_create(int cpu) dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL, "cpu%d", cpu); - return IS_ERR(dev) ? PTR_ERR(dev) : 0; + return PTR_ERR_OR_ZERO(dev); } static void cpuid_device_destroy(int cpu) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 49f886481615..dd2f07ae9d0c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1114,8 +1114,8 @@ void __init memblock_find_dma_reserve(void) * at first, and assume boot_mem will not take below MAX_DMA_PFN */ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { - start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN); - end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN); + start_pfn = min(start_pfn, MAX_DMA_PFN); + end_pfn = min(end_pfn, MAX_DMA_PFN); nr_pages += end_pfn - start_pfn; } diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 2e1a6853e00c..fe9f0b79a18b 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -455,6 +455,23 @@ struct intel_stolen_funcs { u32 (*base)(int num, int slot, int func, size_t size); }; +static size_t __init gen9_stolen_size(int num, int slot, int func) +{ + u16 gmch_ctrl; + + gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); + gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; + gmch_ctrl &= BDW_GMCH_GMS_MASK; + + if (gmch_ctrl < 0xf0) + return gmch_ctrl << 25; /* 32 MB units */ + else + /* 4MB increments starting at 0xf0 for 4MB */ + return (gmch_ctrl - 0xf0 + 1) << 22; +} + +typedef size_t (*stolen_size_fn)(int num, int slot, int func); + static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { .base = i830_stolen_base, .size = i830_stolen_size, @@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { .size = gen8_stolen_size, }; +static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { + .base = intel_stolen_base, + .size = gen9_stolen_size, +}; + static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { .base = intel_stolen_base, .size = chv_stolen_size, @@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = { INTEL_BDW_M_IDS(&gen8_stolen_funcs), INTEL_BDW_D_IDS(&gen8_stolen_funcs), INTEL_CHV_IDS(&chv_stolen_funcs), + INTEL_SKL_IDS(&gen9_stolen_funcs), }; static void __init intel_graphics_stolen(int num, int slot, int func) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 344b63f18d14..1cf7c97ff175 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1191,10 +1191,10 @@ ENTRY(ftrace_graph_caller) pushl %eax pushl %ecx pushl %edx - movl 0xc(%esp), %edx - lea 0x4(%ebp), %eax + movl 0xc(%esp), %eax + lea 0x4(%ebp), %edx movl (%ebp), %ecx - subl $MCOUNT_INSN_SIZE, %edx + subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx popl %ecx diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c0226ab54106..90878aa38dbd 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -652,6 +652,20 @@ ENTRY(stub_execve) CFI_ENDPROC END(stub_execve) +ENTRY(stub_execveat) + CFI_STARTPROC + addq $8, %rsp + PARTIAL_FRAME 0 + SAVE_REST + FIXUP_TOP_OF_STACK %r11 + call sys_execveat + RESTORE_TOP_OF_STACK %r11 + movq %rax,RAX(%rsp) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +END(stub_execveat) + /* * sigreturn is special because it needs to restore all registers on return. * This cannot be done with SYSRET, so use the IRET return path instead. @@ -697,6 +711,20 @@ ENTRY(stub_x32_execve) CFI_ENDPROC END(stub_x32_execve) +ENTRY(stub_x32_execveat) + CFI_STARTPROC + addq $8, %rsp + PARTIAL_FRAME 0 + SAVE_REST + FIXUP_TOP_OF_STACK %r11 + call compat_sys_execveat + RESTORE_TOP_OF_STACK %r11 + movq %rax,RAX(%rsp) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +END(stub_x32_execveat) + #endif /* diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 94d857fb1033..f5d0730e7b08 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -122,9 +122,6 @@ static void init_espfix_random(void) void __init init_espfix_bsp(void) { pgd_t *pgd_p; - pteval_t ptemask; - - ptemask = __supported_pte_mask; /* Install the espfix pud into the kernel page directory */ pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 3386dc9aa333..2142376dc8c6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -17,6 +17,7 @@ #include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> @@ -47,7 +48,7 @@ int ftrace_arch_code_modify_post_process(void) union ftrace_code_union { char code[MCOUNT_INSN_SIZE]; struct { - char e8; + unsigned char e8; int offset; } __attribute__((packed)); }; @@ -582,7 +583,7 @@ void ftrace_replace_code(int enable) remove_breakpoints: pr_warn("Failed on %s (%d):\n", report, count); - ftrace_bug(ret, rec ? rec->ip : 0); + ftrace_bug(ret, rec); for_ftrace_rec_iter(iter) { rec = ftrace_rec_iter_record(iter); /* @@ -644,13 +645,8 @@ int __init ftrace_dyn_arch_init(void) { return 0; } -#endif - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - -#ifdef CONFIG_DYNAMIC_FTRACE -extern void ftrace_graph_call(void); +#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER) static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { static union ftrace_code_union calc; @@ -664,6 +660,280 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) */ return calc.code; } +#endif + +/* Currently only x86_64 supports dynamic trampolines */ +#ifdef CONFIG_X86_64 + +#ifdef CONFIG_MODULES +#include <linux/moduleloader.h> +/* Module allocation simplifies allocating memory for code */ +static inline void *alloc_tramp(unsigned long size) +{ + return module_alloc(size); +} +static inline void tramp_free(void *tramp) +{ + module_free(NULL, tramp); +} +#else +/* Trampolines can only be created if modules are supported */ +static inline void *alloc_tramp(unsigned long size) +{ + return NULL; +} +static inline void tramp_free(void *tramp) { } +#endif + +/* Defined as markers to the end of the ftrace default trampolines */ +extern void ftrace_caller_end(void); +extern void ftrace_regs_caller_end(void); +extern void ftrace_return(void); +extern void ftrace_caller_op_ptr(void); +extern void ftrace_regs_caller_op_ptr(void); + +/* movq function_trace_op(%rip), %rdx */ +/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ +#define OP_REF_SIZE 7 + +/* + * The ftrace_ops is passed to the function callback. Since the + * trampoline only services a single ftrace_ops, we can pass in + * that ops directly. + * + * The ftrace_op_code_union is used to create a pointer to the + * ftrace_ops that will be passed to the callback function. + */ +union ftrace_op_code_union { + char code[OP_REF_SIZE]; + struct { + char op[3]; + int offset; + } __attribute__((packed)); +}; + +static unsigned long +create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) +{ + unsigned const char *jmp; + unsigned long start_offset; + unsigned long end_offset; + unsigned long op_offset; + unsigned long offset; + unsigned long size; + unsigned long ip; + unsigned long *ptr; + void *trampoline; + /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ + unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; + union ftrace_op_code_union op_ptr; + int ret; + + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { + start_offset = (unsigned long)ftrace_regs_caller; + end_offset = (unsigned long)ftrace_regs_caller_end; + op_offset = (unsigned long)ftrace_regs_caller_op_ptr; + } else { + start_offset = (unsigned long)ftrace_caller; + end_offset = (unsigned long)ftrace_caller_end; + op_offset = (unsigned long)ftrace_caller_op_ptr; + } + + size = end_offset - start_offset; + + /* + * Allocate enough size to store the ftrace_caller code, + * the jmp to ftrace_return, as well as the address of + * the ftrace_ops this trampoline is used for. + */ + trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); + if (!trampoline) + return 0; + + *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); + + /* Copy ftrace_caller onto the trampoline memory */ + ret = probe_kernel_read(trampoline, (void *)start_offset, size); + if (WARN_ON(ret < 0)) { + tramp_free(trampoline); + return 0; + } + + ip = (unsigned long)trampoline + size; + + /* The trampoline ends with a jmp to ftrace_return */ + jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return); + memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); + + /* + * The address of the ftrace_ops that is used for this trampoline + * is stored at the end of the trampoline. This will be used to + * load the third parameter for the callback. Basically, that + * location at the end of the trampoline takes the place of + * the global function_trace_op variable. + */ + + ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); + *ptr = (unsigned long)ops; + + op_offset -= start_offset; + memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); + + /* Are we pointing to the reference? */ + if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { + tramp_free(trampoline); + return 0; + } + + /* Load the contents of ptr into the callback parameter */ + offset = (unsigned long)ptr; + offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; + + op_ptr.offset = offset; + + /* put in the new offset to the ftrace_ops */ + memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); + + /* ALLOC_TRAMP flags lets us know we created it */ + ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; + + return (unsigned long)trampoline; +} + +static unsigned long calc_trampoline_call_offset(bool save_regs) +{ + unsigned long start_offset; + unsigned long call_offset; + + if (save_regs) { + start_offset = (unsigned long)ftrace_regs_caller; + call_offset = (unsigned long)ftrace_regs_call; + } else { + start_offset = (unsigned long)ftrace_caller; + call_offset = (unsigned long)ftrace_call; + } + + return call_offset - start_offset; +} + +void arch_ftrace_update_trampoline(struct ftrace_ops *ops) +{ + ftrace_func_t func; + unsigned char *new; + unsigned long offset; + unsigned long ip; + unsigned int size; + int ret; + + if (ops->trampoline) { + /* + * The ftrace_ops caller may set up its own trampoline. + * In such a case, this code must not modify it. + */ + if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + return; + } else { + ops->trampoline = create_trampoline(ops, &size); + if (!ops->trampoline) + return; + ops->trampoline_size = size; + } + + offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); + ip = ops->trampoline + offset; + + func = ftrace_ops_get_func(ops); + + /* Do a safe modify in case the trampoline is executing */ + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); + + /* The update should never fail */ + WARN_ON(ret); +} + +/* Return the address of the function the trampoline calls */ +static void *addr_from_call(void *ptr) +{ + union ftrace_code_union calc; + int ret; + + ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE); + if (WARN_ON_ONCE(ret < 0)) + return NULL; + + /* Make sure this is a call */ + if (WARN_ON_ONCE(calc.e8 != 0xe8)) { + pr_warn("Expected e8, got %x\n", calc.e8); + return NULL; + } + + return ptr + MCOUNT_INSN_SIZE + calc.offset; +} + +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, + unsigned long frame_pointer); + +/* + * If the ops->trampoline was not allocated, then it probably + * has a static trampoline func, or is the ftrace caller itself. + */ +static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) +{ + unsigned long offset; + bool save_regs = rec->flags & FTRACE_FL_REGS_EN; + void *ptr; + + if (ops && ops->trampoline) { +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * We only know about function graph tracer setting as static + * trampoline. + */ + if (ops->trampoline == FTRACE_GRAPH_ADDR) + return (void *)prepare_ftrace_return; +#endif + return NULL; + } + + offset = calc_trampoline_call_offset(save_regs); + + if (save_regs) + ptr = (void *)FTRACE_REGS_ADDR + offset; + else + ptr = (void *)FTRACE_ADDR + offset; + + return addr_from_call(ptr); +} + +void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) +{ + unsigned long offset; + + /* If we didn't allocate this trampoline, consider it static */ + if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + return static_tramp_func(ops, rec); + + offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); + return addr_from_call((void *)ops->trampoline + offset); +} + +void arch_ftrace_trampoline_free(struct ftrace_ops *ops) +{ + if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + return; + + tramp_free((void *)ops->trampoline); + ops->trampoline = 0; +} + +#endif /* CONFIG_X86_64 */ +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); static int ftrace_mod_jmp(unsigned long ip, void *func) { @@ -694,7 +964,7 @@ int ftrace_disable_ftrace_graph_caller(void) * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, unsigned long frame_pointer) { unsigned long old; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 922d28581024..6307a0f0cf17 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -59,78 +59,78 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "NMI"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); - seq_printf(p, " Non-maskable interrupts\n"); + seq_puts(p, " Non-maskable interrupts\n"); #ifdef CONFIG_X86_LOCAL_APIC seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); - seq_printf(p, " Local timer interrupts\n"); + seq_puts(p, " Local timer interrupts\n"); seq_printf(p, "%*s: ", prec, "SPU"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); - seq_printf(p, " Spurious interrupts\n"); + seq_puts(p, " Spurious interrupts\n"); seq_printf(p, "%*s: ", prec, "PMI"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); - seq_printf(p, " Performance monitoring interrupts\n"); + seq_puts(p, " Performance monitoring interrupts\n"); seq_printf(p, "%*s: ", prec, "IWI"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); - seq_printf(p, " IRQ work interrupts\n"); + seq_puts(p, " IRQ work interrupts\n"); seq_printf(p, "%*s: ", prec, "RTR"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); - seq_printf(p, " APIC ICR read retries\n"); + seq_puts(p, " APIC ICR read retries\n"); #endif if (x86_platform_ipi_callback) { seq_printf(p, "%*s: ", prec, "PLT"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); - seq_printf(p, " Platform interrupts\n"); + seq_puts(p, " Platform interrupts\n"); } #ifdef CONFIG_SMP seq_printf(p, "%*s: ", prec, "RES"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); - seq_printf(p, " Rescheduling interrupts\n"); + seq_puts(p, " Rescheduling interrupts\n"); seq_printf(p, "%*s: ", prec, "CAL"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - irq_stats(j)->irq_tlb_count); - seq_printf(p, " Function call interrupts\n"); + seq_puts(p, " Function call interrupts\n"); seq_printf(p, "%*s: ", prec, "TLB"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); - seq_printf(p, " TLB shootdowns\n"); + seq_puts(p, " TLB shootdowns\n"); #endif #ifdef CONFIG_X86_THERMAL_VECTOR seq_printf(p, "%*s: ", prec, "TRM"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); - seq_printf(p, " Thermal event interrupts\n"); + seq_puts(p, " Thermal event interrupts\n"); #endif #ifdef CONFIG_X86_MCE_THRESHOLD seq_printf(p, "%*s: ", prec, "THR"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); - seq_printf(p, " Threshold APIC interrupts\n"); + seq_puts(p, " Threshold APIC interrupts\n"); #endif #ifdef CONFIG_X86_MCE seq_printf(p, "%*s: ", prec, "MCE"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); - seq_printf(p, " Machine check exceptions\n"); + seq_puts(p, " Machine check exceptions\n"); seq_printf(p, "%*s: ", prec, "MCP"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); - seq_printf(p, " Machine check polls\n"); + seq_puts(p, " Machine check polls\n"); #endif #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) seq_printf(p, "%*s: ", prec, "THR"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); - seq_printf(p, " Hypervisor callback interrupts\n"); + seq_puts(p, " Hypervisor callback interrupts\n"); #endif seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 67e6d19ef1be..f7e3cd50ece0 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -285,7 +285,7 @@ static int can_probe(unsigned long paddr) * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); - kernel_insn_init(&insn, (void *)__addr); + kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); insn_get_length(&insn); /* @@ -330,8 +330,10 @@ int __copy_instruction(u8 *dest, u8 *src) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; + unsigned long recovered_insn = + recover_probed_instruction(buf, (unsigned long)src); - kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); + kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) @@ -342,7 +344,7 @@ int __copy_instruction(u8 *dest, u8 *src) if (insn_rip_relative(&insn)) { s64 newdisp; u8 *disp; - kernel_insn_init(&insn, dest); + kernel_insn_init(&insn, dest, insn.length); insn_get_displacement(&insn); /* * The copied instruction uses the %rip-relative addressing diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 717b02a22e67..5f8f0b3cc674 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -27,7 +27,7 @@ static nokprobe_inline int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) + struct kprobe_ctlblk *kcb, unsigned long orig_ip) { /* * Emulate singlestep (and also recover regs->ip) @@ -39,6 +39,8 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, p->post_handler(p, regs, 0); } __this_cpu_write(current_kprobe, NULL); + if (orig_ip) + regs->ip = orig_ip; return 1; } @@ -46,7 +48,7 @@ int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb); + return __skip_singlestep(p, regs, kcb, 0); else return 0; } @@ -71,13 +73,14 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { + unsigned long orig_ip = regs->ip; /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) - __skip_singlestep(p, regs, kcb); + __skip_singlestep(p, regs, kcb, orig_ip); /* * If pre_handler returns !0, it sets regs->ip and * resets current kprobe. diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index f1314d0bcf0a..7c523bbf3dc8 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -251,13 +251,15 @@ static int can_optimize(unsigned long paddr) /* Decode instructions */ addr = paddr - offset; while (addr < paddr - offset + size) { /* Decode until function end */ + unsigned long recovered_insn; if (search_exception_tables(addr)) /* * Since some fixup code will jumps into this function, * we can't optimize kprobe in this function. */ return 0; - kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); + recovered_insn = recover_probed_instruction(buf, addr); + kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index c73aecf10d34..94ea120fa21f 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S @@ -21,40 +21,159 @@ # define function_hook mcount #endif +/* All cases save the original rbp (8 bytes) */ +#ifdef CONFIG_FRAME_POINTER +# ifdef CC_USING_FENTRY +/* Save parent and function stack frames (rip and rbp) */ +# define MCOUNT_FRAME_SIZE (8+16*2) +# else +/* Save just function stack frame (rip and rbp) */ +# define MCOUNT_FRAME_SIZE (8+16) +# endif +#else +/* No need to save a stack frame */ +# define MCOUNT_FRAME_SIZE 8 +#endif /* CONFIG_FRAME_POINTER */ + +/* Size of stack used to save mcount regs in save_mcount_regs */ +#define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE) + +/* + * gcc -pg option adds a call to 'mcount' in most functions. + * When -mfentry is used, the call is to 'fentry' and not 'mcount' + * and is done before the function's stack frame is set up. + * They both require a set of regs to be saved before calling + * any C code and restored before returning back to the function. + * + * On boot up, all these calls are converted into nops. When tracing + * is enabled, the call can jump to either ftrace_caller or + * ftrace_regs_caller. Callbacks (tracing functions) that require + * ftrace_regs_caller (like kprobes) need to have pt_regs passed to + * it. For this reason, the size of the pt_regs structure will be + * allocated on the stack and the required mcount registers will + * be saved in the locations that pt_regs has them in. + */ + +/* + * @added: the amount of stack added before calling this + * + * After this is called, the following registers contain: + * + * %rdi - holds the address that called the trampoline + * %rsi - holds the parent function (traced function's return address) + * %rdx - holds the original %rbp + */ +.macro save_mcount_regs added=0 + + /* Always save the original rbp */ + pushq %rbp + +#ifdef CONFIG_FRAME_POINTER + /* + * Stack traces will stop at the ftrace trampoline if the frame pointer + * is not set up properly. If fentry is used, we need to save a frame + * pointer for the parent as well as the function traced, because the + * fentry is called before the stack frame is set up, where as mcount + * is called afterward. + */ +#ifdef CC_USING_FENTRY + /* Save the parent pointer (skip orig rbp and our return address) */ + pushq \added+8*2(%rsp) + pushq %rbp + movq %rsp, %rbp + /* Save the return address (now skip orig rbp, rbp and parent) */ + pushq \added+8*3(%rsp) +#else + /* Can't assume that rip is before this (unless added was zero) */ + pushq \added+8(%rsp) +#endif + pushq %rbp + movq %rsp, %rbp +#endif /* CONFIG_FRAME_POINTER */ + + /* + * We add enough stack to save all regs. + */ + subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp + movq %rax, RAX(%rsp) + movq %rcx, RCX(%rsp) + movq %rdx, RDX(%rsp) + movq %rsi, RSI(%rsp) + movq %rdi, RDI(%rsp) + movq %r8, R8(%rsp) + movq %r9, R9(%rsp) + /* + * Save the original RBP. Even though the mcount ABI does not + * require this, it helps out callers. + */ + movq MCOUNT_REG_SIZE-8(%rsp), %rdx + movq %rdx, RBP(%rsp) + + /* Copy the parent address into %rsi (second parameter) */ +#ifdef CC_USING_FENTRY + movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi +#else + /* %rdx contains original %rbp */ + movq 8(%rdx), %rsi +#endif + + /* Move RIP to its proper location */ + movq MCOUNT_REG_SIZE+\added(%rsp), %rdi + movq %rdi, RIP(%rsp) + + /* + * Now %rdi (the first parameter) has the return address of + * where ftrace_call returns. But the callbacks expect the + * address of the call itself. + */ + subq $MCOUNT_INSN_SIZE, %rdi + .endm + +.macro restore_mcount_regs + movq R9(%rsp), %r9 + movq R8(%rsp), %r8 + movq RDI(%rsp), %rdi + movq RSI(%rsp), %rsi + movq RDX(%rsp), %rdx + movq RCX(%rsp), %rcx + movq RAX(%rsp), %rax + + /* ftrace_regs_caller can modify %rbp */ + movq RBP(%rsp), %rbp + + addq $MCOUNT_REG_SIZE, %rsp + + .endm + #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(function_hook) retq END(function_hook) -/* skip is set if stack has been adjusted */ -.macro ftrace_caller_setup skip=0 - MCOUNT_SAVE_FRAME \skip +ENTRY(ftrace_caller) + /* save_mcount_regs fills in first two parameters */ + save_mcount_regs +GLOBAL(ftrace_caller_op_ptr) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx - /* Load ip into the first parameter */ - movq RIP(%rsp), %rdi - subq $MCOUNT_INSN_SIZE, %rdi - /* Load the parent_ip into the second parameter */ -#ifdef CC_USING_FENTRY - movq SS+16(%rsp), %rsi -#else - movq 8(%rbp), %rsi -#endif -.endm - -ENTRY(ftrace_caller) - ftrace_caller_setup /* regs go into 4th parameter (but make it NULL) */ movq $0, %rcx GLOBAL(ftrace_call) call ftrace_stub - MCOUNT_RESTORE_FRAME -ftrace_return: + restore_mcount_regs + + /* + * The copied trampoline must call ftrace_return as it + * still may need to call the function graph tracer. + */ +GLOBAL(ftrace_caller_end) + +GLOBAL(ftrace_return) #ifdef CONFIG_FUNCTION_GRAPH_TRACER GLOBAL(ftrace_graph_call) @@ -66,11 +185,16 @@ GLOBAL(ftrace_stub) END(ftrace_caller) ENTRY(ftrace_regs_caller) - /* Save the current flags before compare (in SS location)*/ + /* Save the current flags before any operations that can change them */ pushfq - /* skip=8 to skip flags saved in SS */ - ftrace_caller_setup 8 + /* added 8 bytes to save flags */ + save_mcount_regs 8 + /* save_mcount_regs fills in first two parameters */ + +GLOBAL(ftrace_regs_caller_op_ptr) + /* Load the ftrace_ops into the 3rd parameter */ + movq function_trace_op(%rip), %rdx /* Save the rest of pt_regs */ movq %r15, R15(%rsp) @@ -79,18 +203,17 @@ ENTRY(ftrace_regs_caller) movq %r12, R12(%rsp) movq %r11, R11(%rsp) movq %r10, R10(%rsp) - movq %rbp, RBP(%rsp) movq %rbx, RBX(%rsp) /* Copy saved flags */ - movq SS(%rsp), %rcx + movq MCOUNT_REG_SIZE(%rsp), %rcx movq %rcx, EFLAGS(%rsp) /* Kernel segments */ movq $__KERNEL_DS, %rcx movq %rcx, SS(%rsp) movq $__KERNEL_CS, %rcx movq %rcx, CS(%rsp) - /* Stack - skipping return address */ - leaq SS+16(%rsp), %rcx + /* Stack - skipping return address and flags */ + leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx movq %rcx, RSP(%rsp) /* regs go into 4th parameter */ @@ -101,11 +224,11 @@ GLOBAL(ftrace_regs_call) /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax - movq %rax, SS(%rsp) + movq %rax, MCOUNT_REG_SIZE(%rsp) /* Handlers can change the RIP */ movq RIP(%rsp), %rax - movq %rax, SS+8(%rsp) + movq %rax, MCOUNT_REG_SIZE+8(%rsp) /* restore the rest of pt_regs */ movq R15(%rsp), %r15 @@ -113,19 +236,22 @@ GLOBAL(ftrace_regs_call) movq R13(%rsp), %r13 movq R12(%rsp), %r12 movq R10(%rsp), %r10 - movq RBP(%rsp), %rbp movq RBX(%rsp), %rbx - /* skip=8 to skip flags saved in SS */ - MCOUNT_RESTORE_FRAME 8 + restore_mcount_regs /* Restore flags */ popfq - jmp ftrace_return + /* + * As this jmp to ftrace_return can be a short jump + * it must not be copied into the trampoline. + * The trampoline will add the code to jump + * to the return. + */ +GLOBAL(ftrace_regs_caller_end) - popfq - jmp ftrace_stub + jmp ftrace_return END(ftrace_regs_caller) @@ -136,6 +262,7 @@ ENTRY(function_hook) cmpq $ftrace_stub, ftrace_trace_function jnz trace +fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpq $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller @@ -148,42 +275,35 @@ GLOBAL(ftrace_stub) retq trace: - MCOUNT_SAVE_FRAME - - movq RIP(%rsp), %rdi -#ifdef CC_USING_FENTRY - movq SS+16(%rsp), %rsi -#else - movq 8(%rbp), %rsi -#endif - subq $MCOUNT_INSN_SIZE, %rdi + /* save_mcount_regs fills in first two parameters */ + save_mcount_regs call *ftrace_trace_function - MCOUNT_RESTORE_FRAME + restore_mcount_regs - jmp ftrace_stub + jmp fgraph_trace END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) - MCOUNT_SAVE_FRAME + /* Saves rbp into %rdx and fills first parameter */ + save_mcount_regs #ifdef CC_USING_FENTRY - leaq SS+16(%rsp), %rdi + leaq MCOUNT_REG_SIZE+8(%rsp), %rsi movq $0, %rdx /* No framepointers needed */ #else - leaq 8(%rbp), %rdi - movq (%rbp), %rdx + /* Save address of the return address of traced function */ + leaq 8(%rdx), %rsi + /* ftrace does sanity checks against frame pointers */ + movq (%rdx), %rdx #endif - movq RIP(%rsp), %rsi - subq $MCOUNT_INSN_SIZE, %rsi - call prepare_ftrace_return - MCOUNT_RESTORE_FRAME + restore_mcount_regs retq END(ftrace_graph_caller) diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index c9603ac80de5..113e70784854 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -22,6 +22,8 @@ * an SMP box will direct the access to CPU %d. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> @@ -50,11 +52,11 @@ static loff_t msr_seek(struct file *file, loff_t offset, int orig) mutex_lock(&inode->i_mutex); switch (orig) { - case 0: + case SEEK_SET: file->f_pos = offset; ret = file->f_pos; break; - case 1: + case SEEK_CUR: file->f_pos += offset; ret = file->f_pos; break; @@ -206,7 +208,7 @@ static int msr_device_create(int cpu) dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL, "msr%d", cpu); - return IS_ERR(dev) ? PTR_ERR(dev) : 0; + return PTR_ERR_OR_ZERO(dev); } static void msr_device_destroy(int cpu) @@ -248,8 +250,7 @@ static int __init msr_init(void) i = 0; if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { - printk(KERN_ERR "msr: unable to get major %d for msr\n", - MSR_MAJOR); + pr_err("unable to get major %d for msr\n", MSR_MAJOR); err = -EBUSY; goto out; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3ed4a68d4013..5a2c02913af3 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -283,24 +283,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) fpu = switch_fpu_prepare(prev_p, next_p, cpu); - /* - * Reload esp0, LDT and the page table pointer: - */ + /* Reload esp0 and ss1. */ load_sp0(tss, next); - /* - * Switch DS and ES. - * This won't pick up thread selector changes, but I guess that is ok. - */ - savesegment(es, prev->es); - if (unlikely(next->es | prev->es)) - loadsegment(es, next->es); - - savesegment(ds, prev->ds); - if (unlikely(next->ds | prev->ds)) - loadsegment(ds, next->ds); - - /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * @@ -309,41 +294,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) savesegment(fs, fsindex); savesegment(gs, gsindex); + /* + * Load TLS before restoring any segments so that segment loads + * reference the correct GDT entries. + */ load_TLS(next, cpu); /* - * Leave lazy mode, flushing any hypercalls made here. - * This must be done before restoring TLS segments so - * the GDT and LDT are properly updated, and must be - * done before math_state_restore, so the TS bit is up - * to date. + * Leave lazy mode, flushing any hypercalls made here. This + * must be done after loading TLS entries in the GDT but before + * loading segments that might reference them, and and it must + * be done before math_state_restore, so the TS bit is up to + * date. */ arch_end_context_switch(next_p); + /* Switch DS and ES. + * + * Reading them only returns the selectors, but writing them (if + * nonzero) loads the full descriptor from the GDT or LDT. The + * LDT for next is loaded in switch_mm, and the GDT is loaded + * above. + * + * We therefore need to write new values to the segment + * registers on every context switch unless both the new and old + * values are zero. + * + * Note that we don't need to do anything for CS and SS, as + * those are saved and restored as part of pt_regs. + */ + savesegment(es, prev->es); + if (unlikely(next->es | prev->es)) + loadsegment(es, next->es); + + savesegment(ds, prev->ds); + if (unlikely(next->ds | prev->ds)) + loadsegment(ds, next->ds); + /* * Switch FS and GS. * - * Segment register != 0 always requires a reload. Also - * reload when it has changed. When prev process used 64bit - * base always reload to avoid an information leak. + * These are even more complicated than FS and GS: they have + * 64-bit bases are that controlled by arch_prctl. Those bases + * only differ from the values in the GDT or LDT if the selector + * is 0. + * + * Loading the segment register resets the hidden base part of + * the register to 0 or the value from the GDT / LDT. If the + * next base address zero, writing 0 to the segment register is + * much faster than using wrmsr to explicitly zero the base. + * + * The thread_struct.fs and thread_struct.gs values are 0 + * if the fs and gs bases respectively are not overridden + * from the values implied by fsindex and gsindex. They + * are nonzero, and store the nonzero base addresses, if + * the bases are overridden. + * + * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should + * be impossible. + * + * Therefore we need to reload the segment registers if either + * the old or new selector is nonzero, and we need to override + * the base address if next thread expects it to be overridden. + * + * This code is unnecessarily slow in the case where the old and + * new indexes are zero and the new base is nonzero -- it will + * unnecessarily write 0 to the selector before writing the new + * base address. + * + * Note: This all depends on arch_prctl being the only way that + * user code can override the segment base. Once wrfsbase and + * wrgsbase are enabled, most of this code will need to change. */ if (unlikely(fsindex | next->fsindex | prev->fs)) { loadsegment(fs, next->fsindex); + /* - * Check if the user used a selector != 0; if yes - * clear 64bit base, since overloaded base is always - * mapped to the Null selector + * If user code wrote a nonzero value to FS, then it also + * cleared the overridden base address. + * + * XXX: if user code wrote 0 to FS and cleared the base + * address itself, we won't notice and we'll incorrectly + * restore the prior base address next time we reschdule + * the process. */ if (fsindex) prev->fs = 0; } - /* when next process has a 64bit base use it */ if (next->fs) wrmsrl(MSR_FS_BASE, next->fs); prev->fsindex = fsindex; if (unlikely(gsindex | next->gsindex | prev->gs)) { load_gs_index(next->gsindex); + + /* This works (and fails) the same way as fsindex above. */ if (gsindex) prev->gs = 0; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ab08aa2276fb..ab4734e5411d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -960,6 +960,8 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = _brk_end; + mpx_mm_init(&init_mm); + code_resource.start = __pa_symbol(_text); code_resource.end = __pa_symbol(_etext)-1; data_resource.start = __pa_symbol(_etext); @@ -1190,9 +1192,7 @@ void __init setup_arch(char **cmdline_p) tboot_probe(); -#ifdef CONFIG_X86_64 map_vsyscall(); -#endif generic_apic_probe(); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 5cdff0357746..e4fcb87ba7a6 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -30,7 +30,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number); #define BOOT_PERCPU_OFFSET 0 #endif -DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; EXPORT_PER_CPU_SYMBOL(this_cpu_off); unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 668d8f2a8781..7a8f5845e8eb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -99,7 +99,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); /* Per CPU bogomips and other parameters */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); +DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); atomic_t init_deasserted; diff --git a/arch/x86/kernel/sysfb.c b/arch/x86/kernel/sysfb.c index 193ec2ce46c7..160386e9fc17 100644 --- a/arch/x86/kernel/sysfb.c +++ b/arch/x86/kernel/sysfb.c @@ -67,7 +67,7 @@ static __init int sysfb_init(void) pd = platform_device_register_resndata(NULL, name, 0, NULL, 0, si, sizeof(*si)); - return IS_ERR(pd) ? PTR_ERR(pd) : 0; + return PTR_ERR_OR_ZERO(pd); } /* must execute after PCI subsystem for EFI quirks */ diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 86179d409893..764a29f84de7 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c @@ -88,8 +88,5 @@ __init int create_simplefb(const struct screen_info *si, pd = platform_device_register_resndata(NULL, "simple-framebuffer", 0, &res, 1, mode, sizeof(*mode)); - if (IS_ERR(pd)) - return PTR_ERR(pd); - - return 0; + return PTR_ERR_OR_ZERO(pd); } diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 0fa29609b2c4..25adc0e16eaa 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -23,7 +23,7 @@ #include <asm/time.h> #ifdef CONFIG_X86_64 -__visible DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES; +__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES; #endif unsigned long profile_pc(struct pt_regs *regs) diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index f7fec09e3e3a..3e551eee87b9 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -27,6 +27,43 @@ static int get_free_idx(void) return -ESRCH; } +static bool tls_desc_okay(const struct user_desc *info) +{ + if (LDT_empty(info)) + return true; + + /* + * espfix is required for 16-bit data segments, but espfix + * only works for LDT segments. + */ + if (!info->seg_32bit) + return false; + + /* Only allow data segments in the TLS array. */ + if (info->contents > 1) + return false; + + /* + * Non-present segments with DPL 3 present an interesting attack + * surface. The kernel should handle such segments correctly, + * but TLS is very difficult to protect in a sandbox, so prevent + * such segments from being created. + * + * If userspace needs to remove a TLS entry, it can still delete + * it outright. + */ + if (info->seg_not_present) + return false; + +#ifdef CONFIG_X86_64 + /* The L bit makes no sense for data. */ + if (info->lm) + return false; +#endif + + return true; +} + static void set_tls_desc(struct task_struct *p, int idx, const struct user_desc *info, int n) { @@ -66,6 +103,9 @@ int do_set_thread_area(struct task_struct *p, int idx, if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; + if (!tls_desc_okay(&info)) + return -EINVAL; + if (idx == -1) idx = info.entry_number; @@ -192,6 +232,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, { struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; const struct user_desc *info; + int i; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || @@ -205,6 +246,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, else info = infobuf; + for (i = 0; i < count / sizeof(struct user_desc); i++) + if (!tls_desc_okay(info + i)) + return -EINVAL; + set_tls_desc(target, GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), info, count / sizeof(struct user_desc)); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index de801f22128a..a9ae20579895 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -60,6 +60,7 @@ #include <asm/fixmap.h> #include <asm/mach_traps.h> #include <asm/alternative.h> +#include <asm/mpx.h> #ifdef CONFIG_X86_64 #include <asm/x86_init.h> @@ -228,7 +229,6 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) -DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) @@ -286,6 +286,89 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) } #endif +dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) +{ + struct task_struct *tsk = current; + struct xsave_struct *xsave_buf; + enum ctx_state prev_state; + struct bndcsr *bndcsr; + siginfo_t *info; + + prev_state = exception_enter(); + if (notify_die(DIE_TRAP, "bounds", regs, error_code, + X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) + goto exit; + conditional_sti(regs); + + if (!user_mode(regs)) + die("bounds", regs, error_code); + + if (!cpu_feature_enabled(X86_FEATURE_MPX)) { + /* The exception is not from Intel MPX */ + goto exit_trap; + } + + /* + * We need to look at BNDSTATUS to resolve this exception. + * It is not directly accessible, though, so we need to + * do an xsave and then pull it out of the xsave buffer. + */ + fpu_save_init(&tsk->thread.fpu); + xsave_buf = &(tsk->thread.fpu.state->xsave); + bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); + if (!bndcsr) + goto exit_trap; + + /* + * The error code field of the BNDSTATUS register communicates status + * information of a bound range exception #BR or operation involving + * bound directory. + */ + switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { + case 2: /* Bound directory has invalid entry. */ + if (mpx_handle_bd_fault(xsave_buf)) + goto exit_trap; + break; /* Success, it was handled */ + case 1: /* Bound violation. */ + info = mpx_generate_siginfo(regs, xsave_buf); + if (PTR_ERR(info)) { + /* + * We failed to decode the MPX instruction. Act as if + * the exception was not caused by MPX. + */ + goto exit_trap; + } + /* + * Success, we decoded the instruction and retrieved + * an 'info' containing the address being accessed + * which caused the exception. This information + * allows and application to possibly handle the + * #BR exception itself. + */ + do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); + kfree(info); + break; + case 0: /* No exception caused by Intel MPX operations. */ + goto exit_trap; + default: + die("bounds", regs, error_code); + } + +exit: + exception_exit(prev_state); + return; +exit_trap: + /* + * This path out is for all the cases where we could not + * handle the exception in some way (like allocating a + * table or telling userspace about it. We will also end + * up here if the kernel has MPX turned off at compile + * time.. + */ + do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); + exception_exit(prev_state); +} + dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { @@ -387,7 +470,7 @@ NOKPROBE_SYMBOL(do_int3); * for scheduling or signal handling. The actual stack switch is done in * entry.S */ -asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs) +asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = eregs; /* Did already sync */ @@ -413,7 +496,7 @@ struct bad_iret_stack { struct pt_regs regs; }; -asmlinkage __visible +asmlinkage __visible notrace struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) { /* @@ -436,6 +519,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) BUG_ON(!user_mode_vm(&new_stack->regs)); return new_stack; } +NOKPROBE_SYMBOL(fixup_bad_iret); #endif /* diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5d1cbfe4ae58..8b96a947021f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -219,7 +219,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool { u32 volatile *good_insns; - insn_init(insn, auprobe->insn, x86_64); + insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); /* has the side-effect of processing the entire instruction */ insn_get_length(insn); if (WARN_ON_ONCE(!insn_complete(insn))) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 49edf2dd3613..00bf300fd846 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -186,6 +186,8 @@ SECTIONS * start another segment - init. */ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) + ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, + "per-CPU data too large - increase CONFIG_PHYSICAL_START") #endif INIT_TEXT_SECTION(PAGE_SIZE) diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 957779f4eb40..2dcc6ff6fdcc 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -1,59 +1,43 @@ /* + * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net> + * + * Based on the original implementation which is: * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright 2003 Andi Kleen, SuSE Labs. * - * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ] + * Parts of the original code have been moved to arch/x86/vdso/vma.c + * + * This file implements vsyscall emulation. vsyscalls are a legacy ABI: + * Userspace can request certain kernel services by calling fixed + * addresses. This concept is problematic: * - * Thanks to hpa@transmeta.com for some useful hint. - * Special thanks to Ingo Molnar for his early experience with - * a different vsyscall implementation for Linux/IA32 and for the name. + * - It interferes with ASLR. + * - It's awkward to write code that lives in kernel addresses but is + * callable by userspace at fixed addresses. + * - The whole concept is impossible for 32-bit compat userspace. + * - UML cannot easily virtualize a vsyscall. * - * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located - * at virtual address -10Mbyte+1024bytes etc... There are at max 4 - * vsyscalls. One vsyscall can reserve more than 1 slot to avoid - * jumping out of line if necessary. We cannot add more with this - * mechanism because older kernels won't return -ENOSYS. + * As of mid-2014, I believe that there is no new userspace code that + * will use a vsyscall if the vDSO is present. I hope that there will + * soon be no new userspace code that will ever use a vsyscall. * - * Note: the concept clashes with user mode linux. UML users should - * use the vDSO. + * The code in this file emulates vsyscalls when notified of a page + * fault to a vsyscall address. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/time.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/timer.h> -#include <linux/seqlock.h> -#include <linux/jiffies.h> -#include <linux/sysctl.h> -#include <linux/topology.h> -#include <linux/timekeeper_internal.h> -#include <linux/getcpu.h> -#include <linux/cpu.h> -#include <linux/smp.h> -#include <linux/notifier.h> #include <linux/syscalls.h> #include <linux/ratelimit.h> #include <asm/vsyscall.h> -#include <asm/pgtable.h> -#include <asm/compat.h> -#include <asm/page.h> #include <asm/unistd.h> #include <asm/fixmap.h> -#include <asm/errno.h> -#include <asm/io.h> -#include <asm/segment.h> -#include <asm/desc.h> -#include <asm/topology.h> #include <asm/traps.h> #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" -DEFINE_VVAR(int, vgetcpu_mode); - static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; static int __init vsyscall_setup(char *str) @@ -222,6 +206,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) "seccomp tried to change syscall nr or ip"); do_exit(SIGSYS); } + regs->orig_ax = -1; if (tmp) goto do_ret; /* skip requested */ @@ -284,46 +269,54 @@ sigsegv: } /* - * Assume __initcall executes before all user space. Hopefully kmod - * doesn't violate that. We'll find out if it does. + * A pseudo VMA to allow ptrace access for the vsyscall page. This only + * covers the 64bit vsyscall page now. 32bit has a real VMA now and does + * not need special handling anymore: */ -static void vsyscall_set_cpu(int cpu) +static const char *gate_vma_name(struct vm_area_struct *vma) { - unsigned long d; - unsigned long node = 0; -#ifdef CONFIG_NUMA - node = cpu_to_node(cpu); -#endif - if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) - write_rdtscp_aux((node << 12) | cpu); - - /* - * Store cpu number in limit so that it can be loaded quickly - * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node) - */ - d = 0x0f40000000000ULL; - d |= cpu; - d |= (node & 0xf) << 12; - d |= (node >> 4) << 48; - - write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); + return "[vsyscall]"; } - -static void cpu_vsyscall_init(void *arg) +static struct vm_operations_struct gate_vma_ops = { + .name = gate_vma_name, +}; +static struct vm_area_struct gate_vma = { + .vm_start = VSYSCALL_ADDR, + .vm_end = VSYSCALL_ADDR + PAGE_SIZE, + .vm_page_prot = PAGE_READONLY_EXEC, + .vm_flags = VM_READ | VM_EXEC, + .vm_ops = &gate_vma_ops, +}; + +struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { - /* preemption should be already off */ - vsyscall_set_cpu(raw_smp_processor_id()); +#ifdef CONFIG_IA32_EMULATION + if (!mm || mm->context.ia32_compat) + return NULL; +#endif + if (vsyscall_mode == NONE) + return NULL; + return &gate_vma; } -static int -cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) +int in_gate_area(struct mm_struct *mm, unsigned long addr) { - long cpu = (long)arg; + struct vm_area_struct *vma = get_gate_vma(mm); + + if (!vma) + return 0; - if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); + return (addr >= vma->vm_start) && (addr < vma->vm_end); +} - return NOTIFY_DONE; +/* + * Use this when you have no reliable mm, typically from interrupt + * context. It is less reliable than using a task's mm and may give + * false positives. + */ +int in_gate_area_no_mm(unsigned long addr) +{ + return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; } void __init map_vsyscall(void) @@ -331,24 +324,12 @@ void __init map_vsyscall(void) extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); - __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, - vsyscall_mode == NATIVE - ? PAGE_KERNEL_VSYSCALL - : PAGE_KERNEL_VVAR); + if (vsyscall_mode != NONE) + __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, + vsyscall_mode == NATIVE + ? PAGE_KERNEL_VSYSCALL + : PAGE_KERNEL_VVAR); + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); } - -static int __init vsyscall_init(void) -{ - cpu_notifier_register_begin(); - - on_each_cpu(cpu_vsyscall_init, NULL, 1); - /* notifier priority > KVM */ - __hotcpu_notifier(cpu_vsyscall_notifier, 30); - - cpu_notifier_register_done(); - - return 0; -} -__initcall(vsyscall_init); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index e48b674639cc..234b0722de53 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -116,8 +116,6 @@ struct x86_msi_ops x86_msi = { .teardown_msi_irqs = default_teardown_msi_irqs, .restore_msi_irqs = default_restore_msi_irqs, .setup_hpet_msi = default_setup_hpet_msi, - .msi_mask_irq = default_msi_mask_irq, - .msix_mask_irq = default_msix_mask_irq, }; /* MSI arch specific hooks */ @@ -140,14 +138,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev) { x86_msi.restore_msi_irqs(dev); } -u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) -{ - return x86_msi.msi_mask_irq(desc, mask, flag); -} -u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) -{ - return x86_msi.msix_mask_irq(desc, flag); -} #endif struct x86_io_apic_ops x86_io_apic_ops = { diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 5aaf35641768..ce463a9cc8fb 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -22,7 +22,7 @@ __entry->unsync = sp->unsync; #define KVM_MMU_PAGE_PRINTK() ({ \ - const u32 saved_len = p->len; \ + const char *saved_ptr = trace_seq_buffer_ptr(p); \ static const char *access_str[] = { \ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ }; \ @@ -41,7 +41,7 @@ role.nxe ? "" : "!", \ __entry->root_count, \ __entry->unsync ? "unsync" : "sync", 0); \ - p->buffer + saved_len; \ + saved_ptr; \ }) #define kvm_mmu_trace_pferr_flags \ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index db92793b7e23..1530afb07c85 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -23,7 +23,7 @@ lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o -obj-y += msr.o msr-reg.o msr-reg-export.o hash.o +obj-y += msr.o msr-reg.o msr-reg-export.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c deleted file mode 100644 index ff4fa51a5b1f..000000000000 --- a/arch/x86/lib/hash.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Some portions derived from code covered by the following notice: - * - * Copyright (c) 2010-2013 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/hash.h> -#include <linux/init.h> - -#include <asm/processor.h> -#include <asm/cpufeature.h> -#include <asm/hash.h> - -static inline u32 crc32_u32(u32 crc, u32 val) -{ -#ifdef CONFIG_AS_CRC32 - asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val)); -#else - asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val)); -#endif - return crc; -} - -static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i, tmp = 0; - - for (i = 0; i < len / 4; i++) - seed = crc32_u32(seed, *p32++); - - switch (len & 3) { - case 3: - tmp |= *((const u8 *) p32 + 2) << 16; - /* fallthrough */ - case 2: - tmp |= *((const u8 *) p32 + 1) << 8; - /* fallthrough */ - case 1: - tmp |= *((const u8 *) p32); - seed = crc32_u32(seed, tmp); - break; - } - - return seed; -} - -static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i; - - for (i = 0; i < len; i++) - seed = crc32_u32(seed, *p32++); - - return seed; -} - -void __init setup_arch_fast_hash(struct fast_hash_ops *ops) -{ - if (cpu_has_xmm4_2) { - ops->hash = intel_crc4_2_hash; - ops->hash2 = intel_crc4_2_hash2; - } -} diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 54fcffed28ed..2480978b31cc 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -28,7 +28,7 @@ /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ - ((insn)->next_byte + sizeof(t) + n - (insn)->kaddr <= MAX_INSN_SIZE) + ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr) #define __get_next(t, insn) \ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) @@ -50,10 +50,11 @@ * @kaddr: address (in kernel memory) of instruction (or copy thereof) * @x86_64: !0 for 64-bit kernel or 64-bit app */ -void insn_init(struct insn *insn, const void *kaddr, int x86_64) +void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) { memset(insn, 0, sizeof(*insn)); insn->kaddr = kaddr; + insn->end_kaddr = kaddr + buf_len; insn->next_byte = kaddr; insn->x86_64 = x86_64 ? 1 : 0; insn->opnd_bytes = 4; diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 6a19ad9f370d..ecfdc46a024a 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -30,3 +30,5 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_MEMTEST) += memtest.o + +obj-$(CONFIG_X86_INTEL_MPX) += mpx.o diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 95a427e57887..f0cedf3395af 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -76,6 +76,9 @@ static struct addr_marker address_markers[] = { # ifdef CONFIG_X86_ESPFIX64 { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, # endif +# ifdef CONFIG_EFI + { EFI_VA_END, "EFI Runtime Services" }, +# endif { __START_KERNEL_map, "High Kernel Mapping" }, { MODULES_VADDR, "Modules" }, { MODULES_END, "End Modules" }, @@ -126,7 +129,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) if (!pgprot_val(prot)) { /* Not present */ - pt_dump_cont_printf(m, dmsg, " "); + pt_dump_cont_printf(m, dmsg, " "); } else { if (pr & _PAGE_USER) pt_dump_cont_printf(m, dmsg, "USR "); @@ -145,18 +148,16 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) else pt_dump_cont_printf(m, dmsg, " "); - /* Bit 9 has a different meaning on level 3 vs 4 */ - if (level <= 3) { - if (pr & _PAGE_PSE) - pt_dump_cont_printf(m, dmsg, "PSE "); - else - pt_dump_cont_printf(m, dmsg, " "); - } else { - if (pr & _PAGE_PAT) - pt_dump_cont_printf(m, dmsg, "pat "); - else - pt_dump_cont_printf(m, dmsg, " "); - } + /* Bit 7 has a different meaning on level 3 vs 4 */ + if (level <= 3 && pr & _PAGE_PSE) + pt_dump_cont_printf(m, dmsg, "PSE "); + else + pt_dump_cont_printf(m, dmsg, " "); + if ((level == 4 && pr & _PAGE_PAT) || + ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE)) + pt_dump_cont_printf(m, dmsg, "pat "); + else + pt_dump_cont_printf(m, dmsg, " "); if (pr & _PAGE_GLOBAL) pt_dump_cont_printf(m, dmsg, "GLB "); else diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d973e61e450d..b74a7e130b03 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -844,11 +844,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; int code = BUS_ADRERR; - up_read(&mm->mmap_sem); - /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) { no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); @@ -879,7 +876,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { if (fatal_signal_pending(current) && !(error_code & PF_USER)) { - up_read(¤t->mm->mmap_sem); no_context(regs, error_code, address, 0, 0); return; } @@ -887,14 +883,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, if (fault & VM_FAULT_OOM) { /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) { - up_read(¤t->mm->mmap_sem); no_context(regs, error_code, address, SIGSEGV, SEGV_MAPERR); return; } - up_read(¤t->mm->mmap_sem); - /* * We ran out of memory, call the OOM killer, and return the * userspace (which will retry the fault, or kill us if we got @@ -1062,7 +1055,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; - int fault; + int fault, major = 0; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; tsk = current; @@ -1237,47 +1230,50 @@ good_area: * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. */ fault = handle_mm_fault(mm, vma, address, flags); + major |= fault & VM_FAULT_MAJOR; /* - * If we need to retry but a fatal signal is pending, handle the - * signal first. We do not need to release the mmap_sem because it - * would already be released in __lock_page_or_retry in mm/filemap.c. + * If we need to retry the mmap_sem has already been released, + * and if there is a fatal signal pending there is no guarantee + * that we made any progress. Handle this case first. */ - if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))) + if (unlikely(fault & VM_FAULT_RETRY)) { + /* Retry at most once */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + if (!fatal_signal_pending(tsk)) + goto retry; + } + + /* User mode? Just return to handle the fatal exception */ + if (fault & FAULT_FLAG_USER) + return; + + /* Not returning to user mode? Handle exceptions or die: */ + no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); return; + } + up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, error_code, address, fault); return; } /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. + * Major/minor page fault accounting. If any of the events + * returned VM_FAULT_MAJOR, we account it as a major fault. */ - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } - if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - goto retry; - } + if (major) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } check_v8086_mode(regs, address, tsk); - - up_read(&mm->mmap_sem); } NOKPROBE_SYMBOL(__do_page_fault); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 66dba36f2343..a97ee0801475 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -27,6 +27,35 @@ #include "mm_internal.h" +/* + * Tables translating between page_cache_type_t and pte encoding. + * Minimal supported modes are defined statically, modified if more supported + * cache modes are available. + * Index into __cachemode2pte_tbl is the cachemode. + * Index into __pte2cachemode_tbl are the caching attribute bits of the pte + * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. + */ +uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { + [_PAGE_CACHE_MODE_WB] = 0, + [_PAGE_CACHE_MODE_WC] = _PAGE_PWT, + [_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD, + [_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT, + [_PAGE_CACHE_MODE_WT] = _PAGE_PCD, + [_PAGE_CACHE_MODE_WP] = _PAGE_PCD, +}; +EXPORT_SYMBOL_GPL(__cachemode2pte_tbl); +uint8_t __pte2cachemode_tbl[8] = { + [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB, + [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC, + [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS, + [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC, + [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB, + [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC, + [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, + [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, +}; +EXPORT_SYMBOL_GPL(__pte2cachemode_tbl); + static unsigned long __initdata pgt_buf_start; static unsigned long __initdata pgt_buf_end; static unsigned long __initdata pgt_buf_top; @@ -674,10 +703,10 @@ void __init zone_sizes_init(void) memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; + max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); #endif #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; + max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM @@ -687,3 +716,11 @@ void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } +void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) +{ + /* entry 0 MUST be WB (hardwired to speed up translations) */ + BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); + + __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); + __pte2cachemode_tbl[entry] = cache; +} diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4e5dfec750fc..30eb05ae7061 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -52,7 +52,6 @@ #include <asm/numa.h> #include <asm/cacheflush.h> #include <asm/init.h> -#include <asm/uv/uv.h> #include <asm/setup.h> #include "mm_internal.h" @@ -338,12 +337,15 @@ pte_t * __init populate_extra_pte(unsigned long vaddr) * Create large page table mappings for a range of physical addresses. */ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, - pgprot_t prot) + enum page_cache_mode cache) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; + pgprot_t prot; + pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | + pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { pgd = pgd_offset_k((unsigned long)__va(phys)); @@ -366,12 +368,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) { - __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); + __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); } void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) { - __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); + __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); } /* @@ -1202,66 +1204,15 @@ int kern_addr_valid(unsigned long addr) return pfn_valid(pte_pfn(*pte)); } -/* - * A pseudo VMA to allow ptrace access for the vsyscall page. This only - * covers the 64bit vsyscall page now. 32bit has a real VMA now and does - * not need special handling anymore: - */ -static const char *gate_vma_name(struct vm_area_struct *vma) -{ - return "[vsyscall]"; -} -static struct vm_operations_struct gate_vma_ops = { - .name = gate_vma_name, -}; -static struct vm_area_struct gate_vma = { - .vm_start = VSYSCALL_ADDR, - .vm_end = VSYSCALL_ADDR + PAGE_SIZE, - .vm_page_prot = PAGE_READONLY_EXEC, - .vm_flags = VM_READ | VM_EXEC, - .vm_ops = &gate_vma_ops, -}; - -struct vm_area_struct *get_gate_vma(struct mm_struct *mm) -{ -#ifdef CONFIG_IA32_EMULATION - if (!mm || mm->context.ia32_compat) - return NULL; -#endif - return &gate_vma; -} - -int in_gate_area(struct mm_struct *mm, unsigned long addr) -{ - struct vm_area_struct *vma = get_gate_vma(mm); - - if (!vma) - return 0; - - return (addr >= vma->vm_start) && (addr < vma->vm_end); -} - -/* - * Use this when you have no reliable mm, typically from interrupt - * context. It is less reliable than using a task's mm and may give - * false positives. - */ -int in_gate_area_no_mm(unsigned long addr) -{ - return (addr & PAGE_MASK) == VSYSCALL_ADDR; -} - static unsigned long probe_memory_block_size(void) { /* start from 2g */ unsigned long bz = 1UL<<31; -#ifdef CONFIG_X86_UV - if (is_uv_system()) { - printk(KERN_INFO "UV: memory block size 2GB\n"); + if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) { + pr_info("Using 2GB memory block size for large-memory system\n"); return 2UL * 1024 * 1024 * 1024; } -#endif /* less than 64g installed */ if ((max_pfn << PAGE_SHIFT) < (16UL << 32)) diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 7b179b499fa3..9ca35fc60cfe 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -33,17 +33,17 @@ static int is_io_mapping_possible(resource_size_t base, unsigned long size) int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) { - unsigned long flag = _PAGE_CACHE_WC; + enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC; int ret; if (!is_io_mapping_possible(base, size)) return -EINVAL; - ret = io_reserve_memtype(base, base + size, &flag); + ret = io_reserve_memtype(base, base + size, &pcm); if (ret) return ret; - *prot = __pgprot(__PAGE_KERNEL | flag); + *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); return 0; } EXPORT_SYMBOL_GPL(iomap_create_wc); @@ -82,8 +82,10 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) * MTRR is UC or WC. UC_MINUS gets the real intention, of the * user, which is "WC if the MTRR is WC, UC if you can't do that." */ - if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) - prot = PAGE_KERNEL_UC_MINUS; + if (!pat_enabled && pgprot_val(prot) == + (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) + prot = __pgprot(__PAGE_KERNEL | + cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index b12f43c192cf..fdf617c00e2f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -29,20 +29,20 @@ * conflicts. */ int ioremap_change_attr(unsigned long vaddr, unsigned long size, - unsigned long prot_val) + enum page_cache_mode pcm) { unsigned long nrpages = size >> PAGE_SHIFT; int err; - switch (prot_val) { - case _PAGE_CACHE_UC: + switch (pcm) { + case _PAGE_CACHE_MODE_UC: default: err = _set_memory_uc(vaddr, nrpages); break; - case _PAGE_CACHE_WC: + case _PAGE_CACHE_MODE_WC: err = _set_memory_wc(vaddr, nrpages); break; - case _PAGE_CACHE_WB: + case _PAGE_CACHE_MODE_WB: err = _set_memory_wb(vaddr, nrpages); break; } @@ -75,14 +75,14 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap_caller(resource_size_t phys_addr, - unsigned long size, unsigned long prot_val, void *caller) + unsigned long size, enum page_cache_mode pcm, void *caller) { unsigned long offset, vaddr; resource_size_t pfn, last_pfn, last_addr; const resource_size_t unaligned_phys_addr = phys_addr; const unsigned long unaligned_size = size; struct vm_struct *area; - unsigned long new_prot_val; + enum page_cache_mode new_pcm; pgprot_t prot; int retval; void __iomem *ret_addr; @@ -134,38 +134,40 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, - prot_val, &new_prot_val); + pcm, &new_pcm); if (retval) { printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); return NULL; } - if (prot_val != new_prot_val) { - if (!is_new_memtype_allowed(phys_addr, size, - prot_val, new_prot_val)) { + if (pcm != new_pcm) { + if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { printk(KERN_ERR - "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", + "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", (unsigned long long)phys_addr, (unsigned long long)(phys_addr + size), - prot_val, new_prot_val); + pcm, new_pcm); goto err_free_memtype; } - prot_val = new_prot_val; + pcm = new_pcm; } - switch (prot_val) { - case _PAGE_CACHE_UC: + prot = PAGE_KERNEL_IO; + switch (pcm) { + case _PAGE_CACHE_MODE_UC: default: - prot = PAGE_KERNEL_IO_NOCACHE; + prot = __pgprot(pgprot_val(prot) | + cachemode2protval(_PAGE_CACHE_MODE_UC)); break; - case _PAGE_CACHE_UC_MINUS: - prot = PAGE_KERNEL_IO_UC_MINUS; + case _PAGE_CACHE_MODE_UC_MINUS: + prot = __pgprot(pgprot_val(prot) | + cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); break; - case _PAGE_CACHE_WC: - prot = PAGE_KERNEL_IO_WC; + case _PAGE_CACHE_MODE_WC: + prot = __pgprot(pgprot_val(prot) | + cachemode2protval(_PAGE_CACHE_MODE_WC)); break; - case _PAGE_CACHE_WB: - prot = PAGE_KERNEL_IO; + case _PAGE_CACHE_MODE_WB: break; } @@ -178,7 +180,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; - if (kernel_map_sync_memtype(phys_addr, size, prot_val)) + if (kernel_map_sync_memtype(phys_addr, size, pcm)) goto err_free_area; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) @@ -227,14 +229,14 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { /* * Ideally, this should be: - * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; + * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; * * Till we fix all X drivers to use ioremap_wc(), we will use * UC MINUS. */ - unsigned long val = _PAGE_CACHE_UC_MINUS; + enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; - return __ioremap_caller(phys_addr, size, val, + return __ioremap_caller(phys_addr, size, pcm, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_nocache); @@ -252,7 +254,7 @@ EXPORT_SYMBOL(ioremap_nocache); void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { if (pat_enabled) - return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, + return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, __builtin_return_address(0)); else return ioremap_nocache(phys_addr, size); @@ -261,7 +263,7 @@ EXPORT_SYMBOL(ioremap_wc); void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { - return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, + return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); @@ -269,7 +271,8 @@ EXPORT_SYMBOL(ioremap_cache); void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { - return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), + return __ioremap_caller(phys_addr, size, + pgprot2cachemode(__pgprot(prot_val)), __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 6b563a118891..62474ba66c8e 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h @@ -16,4 +16,6 @@ void zone_sizes_init(void); extern int after_bootmem; +void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); + #endif /* __X86_MM_INTERNAL_H */ diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c new file mode 100644 index 000000000000..67ebf5751222 --- /dev/null +++ b/arch/x86/mm/mpx.c @@ -0,0 +1,928 @@ +/* + * mpx.c - Memory Protection eXtensions + * + * Copyright (c) 2014, Intel Corporation. + * Qiaowei Ren <qiaowei.ren@intel.com> + * Dave Hansen <dave.hansen@intel.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/syscalls.h> +#include <linux/sched/sysctl.h> + +#include <asm/i387.h> +#include <asm/insn.h> +#include <asm/mman.h> +#include <asm/mmu_context.h> +#include <asm/mpx.h> +#include <asm/processor.h> +#include <asm/fpu-internal.h> + +static const char *mpx_mapping_name(struct vm_area_struct *vma) +{ + return "[mpx]"; +} + +static struct vm_operations_struct mpx_vma_ops = { + .name = mpx_mapping_name, +}; + +static int is_mpx_vma(struct vm_area_struct *vma) +{ + return (vma->vm_ops == &mpx_vma_ops); +} + +/* + * This is really a simplified "vm_mmap". it only handles MPX + * bounds tables (the bounds directory is user-allocated). + * + * Later on, we use the vma->vm_ops to uniquely identify these + * VMAs. + */ +static unsigned long mpx_mmap(unsigned long len) +{ + unsigned long ret; + unsigned long addr, pgoff; + struct mm_struct *mm = current->mm; + vm_flags_t vm_flags; + struct vm_area_struct *vma; + + /* Only bounds table and bounds directory can be allocated here */ + if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES) + return -EINVAL; + + down_write(&mm->mmap_sem); + + /* Too many mappings? */ + if (mm->map_count > sysctl_max_map_count) { + ret = -ENOMEM; + goto out; + } + + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ + addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE); + if (addr & ~PAGE_MASK) { + ret = addr; + goto out; + } + + vm_flags = VM_READ | VM_WRITE | VM_MPX | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + + /* Set pgoff according to addr for anon_vma */ + pgoff = addr >> PAGE_SHIFT; + + ret = mmap_region(NULL, addr, len, vm_flags, pgoff); + if (IS_ERR_VALUE(ret)) + goto out; + + vma = find_vma(mm, ret); + if (!vma) { + ret = -ENOMEM; + goto out; + } + vma->vm_ops = &mpx_vma_ops; + + if (vm_flags & VM_LOCKED) { + up_write(&mm->mmap_sem); + mm_populate(ret, len); + return ret; + } + +out: + up_write(&mm->mmap_sem); + return ret; +} + +enum reg_type { + REG_TYPE_RM = 0, + REG_TYPE_INDEX, + REG_TYPE_BASE, +}; + +static int get_reg_offset(struct insn *insn, struct pt_regs *regs, + enum reg_type type) +{ + int regno = 0; + + static const int regoff[] = { + offsetof(struct pt_regs, ax), + offsetof(struct pt_regs, cx), + offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, sp), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), +#ifdef CONFIG_X86_64 + offsetof(struct pt_regs, r8), + offsetof(struct pt_regs, r9), + offsetof(struct pt_regs, r10), + offsetof(struct pt_regs, r11), + offsetof(struct pt_regs, r12), + offsetof(struct pt_regs, r13), + offsetof(struct pt_regs, r14), + offsetof(struct pt_regs, r15), +#endif + }; + int nr_registers = ARRAY_SIZE(regoff); + /* + * Don't possibly decode a 32-bit instructions as + * reading a 64-bit-only register. + */ + if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) + nr_registers -= 8; + + switch (type) { + case REG_TYPE_RM: + regno = X86_MODRM_RM(insn->modrm.value); + if (X86_REX_B(insn->rex_prefix.value) == 1) + regno += 8; + break; + + case REG_TYPE_INDEX: + regno = X86_SIB_INDEX(insn->sib.value); + if (X86_REX_X(insn->rex_prefix.value) == 1) + regno += 8; + break; + + case REG_TYPE_BASE: + regno = X86_SIB_BASE(insn->sib.value); + if (X86_REX_B(insn->rex_prefix.value) == 1) + regno += 8; + break; + + default: + pr_err("invalid register type"); + BUG(); + break; + } + + if (regno > nr_registers) { + WARN_ONCE(1, "decoded an instruction with an invalid register"); + return -EINVAL; + } + return regoff[regno]; +} + +/* + * return the address being referenced be instruction + * for rm=3 returning the content of the rm reg + * for rm!=3 calculates the address using SIB and Disp + */ +static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs) +{ + unsigned long addr, base, indx; + int addr_offset, base_offset, indx_offset; + insn_byte_t sib; + + insn_get_modrm(insn); + insn_get_sib(insn); + sib = insn->sib.value; + + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset < 0) + goto out_err; + addr = regs_get_register(regs, addr_offset); + } else { + if (insn->sib.nbytes) { + base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); + if (base_offset < 0) + goto out_err; + + indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); + if (indx_offset < 0) + goto out_err; + + base = regs_get_register(regs, base_offset); + indx = regs_get_register(regs, indx_offset); + addr = base + indx * (1 << X86_SIB_SCALE(sib)); + } else { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset < 0) + goto out_err; + addr = regs_get_register(regs, addr_offset); + } + addr += insn->displacement.value; + } + return (void __user *)addr; +out_err: + return (void __user *)-1; +} + +static int mpx_insn_decode(struct insn *insn, + struct pt_regs *regs) +{ + unsigned char buf[MAX_INSN_SIZE]; + int x86_64 = !test_thread_flag(TIF_IA32); + int not_copied; + int nr_copied; + + not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf)); + nr_copied = sizeof(buf) - not_copied; + /* + * The decoder _should_ fail nicely if we pass it a short buffer. + * But, let's not depend on that implementation detail. If we + * did not get anything, just error out now. + */ + if (!nr_copied) + return -EFAULT; + insn_init(insn, buf, nr_copied, x86_64); + insn_get_length(insn); + /* + * copy_from_user() tries to get as many bytes as we could see in + * the largest possible instruction. If the instruction we are + * after is shorter than that _and_ we attempt to copy from + * something unreadable, we might get a short read. This is OK + * as long as the read did not stop in the middle of the + * instruction. Check to see if we got a partial instruction. + */ + if (nr_copied < insn->length) + return -EFAULT; + + insn_get_opcode(insn); + /* + * We only _really_ need to decode bndcl/bndcn/bndcu + * Error out on anything else. + */ + if (insn->opcode.bytes[0] != 0x0f) + goto bad_opcode; + if ((insn->opcode.bytes[1] != 0x1a) && + (insn->opcode.bytes[1] != 0x1b)) + goto bad_opcode; + + return 0; +bad_opcode: + return -EINVAL; +} + +/* + * If a bounds overflow occurs then a #BR is generated. This + * function decodes MPX instructions to get violation address + * and set this address into extended struct siginfo. + * + * Note that this is not a super precise way of doing this. + * Userspace could have, by the time we get here, written + * anything it wants in to the instructions. We can not + * trust anything about it. They might not be valid + * instructions or might encode invalid registers, etc... + * + * The caller is expected to kfree() the returned siginfo_t. + */ +siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, + struct xsave_struct *xsave_buf) +{ + struct bndreg *bndregs, *bndreg; + siginfo_t *info = NULL; + struct insn insn; + uint8_t bndregno; + int err; + + err = mpx_insn_decode(&insn, regs); + if (err) + goto err_out; + + /* + * We know at this point that we are only dealing with + * MPX instructions. + */ + insn_get_modrm(&insn); + bndregno = X86_MODRM_REG(insn.modrm.value); + if (bndregno > 3) { + err = -EINVAL; + goto err_out; + } + /* get the bndregs _area_ of the xsave structure */ + bndregs = get_xsave_addr(xsave_buf, XSTATE_BNDREGS); + if (!bndregs) { + err = -EINVAL; + goto err_out; + } + /* now go select the individual register in the set of 4 */ + bndreg = &bndregs[bndregno]; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + err = -ENOMEM; + goto err_out; + } + /* + * The registers are always 64-bit, but the upper 32 + * bits are ignored in 32-bit mode. Also, note that the + * upper bounds are architecturally represented in 1's + * complement form. + * + * The 'unsigned long' cast is because the compiler + * complains when casting from integers to different-size + * pointers. + */ + info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound; + info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound; + info->si_addr_lsb = 0; + info->si_signo = SIGSEGV; + info->si_errno = 0; + info->si_code = SEGV_BNDERR; + info->si_addr = mpx_get_addr_ref(&insn, regs); + /* + * We were not able to extract an address from the instruction, + * probably because there was something invalid in it. + */ + if (info->si_addr == (void *)-1) { + err = -EINVAL; + goto err_out; + } + return info; +err_out: + /* info might be NULL, but kfree() handles that */ + kfree(info); + return ERR_PTR(err); +} + +static __user void *task_get_bounds_dir(struct task_struct *tsk) +{ + struct bndcsr *bndcsr; + + if (!cpu_feature_enabled(X86_FEATURE_MPX)) + return MPX_INVALID_BOUNDS_DIR; + + /* + * The bounds directory pointer is stored in a register + * only accessible if we first do an xsave. + */ + fpu_save_init(&tsk->thread.fpu); + bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR); + if (!bndcsr) + return MPX_INVALID_BOUNDS_DIR; + + /* + * Make sure the register looks valid by checking the + * enable bit. + */ + if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG)) + return MPX_INVALID_BOUNDS_DIR; + + /* + * Lastly, mask off the low bits used for configuration + * flags, and return the address of the bounds table. + */ + return (void __user *)(unsigned long) + (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK); +} + +int mpx_enable_management(struct task_struct *tsk) +{ + void __user *bd_base = MPX_INVALID_BOUNDS_DIR; + struct mm_struct *mm = tsk->mm; + int ret = 0; + + /* + * runtime in the userspace will be responsible for allocation of + * the bounds directory. Then, it will save the base of the bounds + * directory into XSAVE/XRSTOR Save Area and enable MPX through + * XRSTOR instruction. + * + * fpu_xsave() is expected to be very expensive. Storing the bounds + * directory here means that we do not have to do xsave in the unmap + * path; we can just use mm->bd_addr instead. + */ + bd_base = task_get_bounds_dir(tsk); + down_write(&mm->mmap_sem); + mm->bd_addr = bd_base; + if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR) + ret = -ENXIO; + + up_write(&mm->mmap_sem); + return ret; +} + +int mpx_disable_management(struct task_struct *tsk) +{ + struct mm_struct *mm = current->mm; + + if (!cpu_feature_enabled(X86_FEATURE_MPX)) + return -ENXIO; + + down_write(&mm->mmap_sem); + mm->bd_addr = MPX_INVALID_BOUNDS_DIR; + up_write(&mm->mmap_sem); + return 0; +} + +/* + * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each + * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB, + * and the size of each bounds table is 4MB. + */ +static int allocate_bt(long __user *bd_entry) +{ + unsigned long expected_old_val = 0; + unsigned long actual_old_val = 0; + unsigned long bt_addr; + int ret = 0; + + /* + * Carve the virtual space out of userspace for the new + * bounds table: + */ + bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES); + if (IS_ERR((void *)bt_addr)) + return PTR_ERR((void *)bt_addr); + /* + * Set the valid flag (kinda like _PAGE_PRESENT in a pte) + */ + bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG; + + /* + * Go poke the address of the new bounds table in to the + * bounds directory entry out in userspace memory. Note: + * we may race with another CPU instantiating the same table. + * In that case the cmpxchg will see an unexpected + * 'actual_old_val'. + * + * This can fault, but that's OK because we do not hold + * mmap_sem at this point, unlike some of the other part + * of the MPX code that have to pagefault_disable(). + */ + ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, + expected_old_val, bt_addr); + if (ret) + goto out_unmap; + + /* + * The user_atomic_cmpxchg_inatomic() will only return nonzero + * for faults, *not* if the cmpxchg itself fails. Now we must + * verify that the cmpxchg itself completed successfully. + */ + /* + * We expected an empty 'expected_old_val', but instead found + * an apparently valid entry. Assume we raced with another + * thread to instantiate this table and desclare succecss. + */ + if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) { + ret = 0; + goto out_unmap; + } + /* + * We found a non-empty bd_entry but it did not have the + * VALID_FLAG set. Return an error which will result in + * a SEGV since this probably means that somebody scribbled + * some invalid data in to a bounds table. + */ + if (expected_old_val != actual_old_val) { + ret = -EINVAL; + goto out_unmap; + } + return 0; +out_unmap: + vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES); + return ret; +} + +/* + * When a BNDSTX instruction attempts to save bounds to a bounds + * table, it will first attempt to look up the table in the + * first-level bounds directory. If it does not find a table in + * the directory, a #BR is generated and we get here in order to + * allocate a new table. + * + * With 32-bit mode, the size of BD is 4MB, and the size of each + * bound table is 16KB. With 64-bit mode, the size of BD is 2GB, + * and the size of each bound table is 4MB. + */ +static int do_mpx_bt_fault(struct xsave_struct *xsave_buf) +{ + unsigned long bd_entry, bd_base; + struct bndcsr *bndcsr; + + bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); + if (!bndcsr) + return -EINVAL; + /* + * Mask off the preserve and enable bits + */ + bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK; + /* + * The hardware provides the address of the missing or invalid + * entry via BNDSTATUS, so we don't have to go look it up. + */ + bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK; + /* + * Make sure the directory entry is within where we think + * the directory is. + */ + if ((bd_entry < bd_base) || + (bd_entry >= bd_base + MPX_BD_SIZE_BYTES)) + return -EINVAL; + + return allocate_bt((long __user *)bd_entry); +} + +int mpx_handle_bd_fault(struct xsave_struct *xsave_buf) +{ + /* + * Userspace never asked us to manage the bounds tables, + * so refuse to help. + */ + if (!kernel_managing_mpx_tables(current->mm)) + return -EINVAL; + + if (do_mpx_bt_fault(xsave_buf)) { + force_sig(SIGSEGV, current); + /* + * The force_sig() is essentially "handling" this + * exception, so we do not pass up the error + * from do_mpx_bt_fault(). + */ + } + return 0; +} + +/* + * A thin wrapper around get_user_pages(). Returns 0 if the + * fault was resolved or -errno if not. + */ +static int mpx_resolve_fault(long __user *addr, int write) +{ + long gup_ret; + int nr_pages = 1; + int force = 0; + + gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, + nr_pages, write, force, NULL, NULL); + /* + * get_user_pages() returns number of pages gotten. + * 0 means we failed to fault in and get anything, + * probably because 'addr' is bad. + */ + if (!gup_ret) + return -EFAULT; + /* Other error, return it */ + if (gup_ret < 0) + return gup_ret; + /* must have gup'd a page and gup_ret>0, success */ + return 0; +} + +/* + * Get the base of bounds tables pointed by specific bounds + * directory entry. + */ +static int get_bt_addr(struct mm_struct *mm, + long __user *bd_entry, unsigned long *bt_addr) +{ + int ret; + int valid_bit; + + if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry))) + return -EFAULT; + + while (1) { + int need_write = 0; + + pagefault_disable(); + ret = get_user(*bt_addr, bd_entry); + pagefault_enable(); + if (!ret) + break; + if (ret == -EFAULT) + ret = mpx_resolve_fault(bd_entry, need_write); + /* + * If we could not resolve the fault, consider it + * userspace's fault and error out. + */ + if (ret) + return ret; + } + + valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG; + *bt_addr &= MPX_BT_ADDR_MASK; + + /* + * When the kernel is managing bounds tables, a bounds directory + * entry will either have a valid address (plus the valid bit) + * *OR* be completely empty. If we see a !valid entry *and* some + * data in the address field, we know something is wrong. This + * -EINVAL return will cause a SIGSEGV. + */ + if (!valid_bit && *bt_addr) + return -EINVAL; + /* + * Do we have an completely zeroed bt entry? That is OK. It + * just means there was no bounds table for this memory. Make + * sure to distinguish this from -EINVAL, which will cause + * a SEGV. + */ + if (!valid_bit) + return -ENOENT; + + return 0; +} + +/* + * Free the backing physical pages of bounds table 'bt_addr'. + * Assume start...end is within that bounds table. + */ +static int zap_bt_entries(struct mm_struct *mm, + unsigned long bt_addr, + unsigned long start, unsigned long end) +{ + struct vm_area_struct *vma; + unsigned long addr, len; + + /* + * Find the first overlapping vma. If vma->vm_start > start, there + * will be a hole in the bounds table. This -EINVAL return will + * cause a SIGSEGV. + */ + vma = find_vma(mm, start); + if (!vma || vma->vm_start > start) + return -EINVAL; + + /* + * A NUMA policy on a VM_MPX VMA could cause this bouds table to + * be split. So we need to look across the entire 'start -> end' + * range of this bounds table, find all of the VM_MPX VMAs, and + * zap only those. + */ + addr = start; + while (vma && vma->vm_start < end) { + /* + * We followed a bounds directory entry down + * here. If we find a non-MPX VMA, that's bad, + * so stop immediately and return an error. This + * probably results in a SIGSEGV. + */ + if (!is_mpx_vma(vma)) + return -EINVAL; + + len = min(vma->vm_end, end) - addr; + zap_page_range(vma, addr, len, NULL); + + vma = vma->vm_next; + addr = vma->vm_start; + } + + return 0; +} + +static int unmap_single_bt(struct mm_struct *mm, + long __user *bd_entry, unsigned long bt_addr) +{ + unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG; + unsigned long actual_old_val = 0; + int ret; + + while (1) { + int need_write = 1; + + pagefault_disable(); + ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, + expected_old_val, 0); + pagefault_enable(); + if (!ret) + break; + if (ret == -EFAULT) + ret = mpx_resolve_fault(bd_entry, need_write); + /* + * If we could not resolve the fault, consider it + * userspace's fault and error out. + */ + if (ret) + return ret; + } + /* + * The cmpxchg was performed, check the results. + */ + if (actual_old_val != expected_old_val) { + /* + * Someone else raced with us to unmap the table. + * There was no bounds table pointed to by the + * directory, so declare success. Somebody freed + * it. + */ + if (!actual_old_val) + return 0; + /* + * Something messed with the bounds directory + * entry. We hold mmap_sem for read or write + * here, so it could not be a _new_ bounds table + * that someone just allocated. Something is + * wrong, so pass up the error and SIGSEGV. + */ + return -EINVAL; + } + + /* + * Note, we are likely being called under do_munmap() already. To + * avoid recursion, do_munmap() will check whether it comes + * from one bounds table through VM_MPX flag. + */ + return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES); +} + +/* + * If the bounds table pointed by bounds directory 'bd_entry' is + * not shared, unmap this whole bounds table. Otherwise, only free + * those backing physical pages of bounds table entries covered + * in this virtual address region start...end. + */ +static int unmap_shared_bt(struct mm_struct *mm, + long __user *bd_entry, unsigned long start, + unsigned long end, bool prev_shared, bool next_shared) +{ + unsigned long bt_addr; + int ret; + + ret = get_bt_addr(mm, bd_entry, &bt_addr); + /* + * We could see an "error" ret for not-present bounds + * tables (not really an error), or actual errors, but + * stop unmapping either way. + */ + if (ret) + return ret; + + if (prev_shared && next_shared) + ret = zap_bt_entries(mm, bt_addr, + bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), + bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); + else if (prev_shared) + ret = zap_bt_entries(mm, bt_addr, + bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), + bt_addr+MPX_BT_SIZE_BYTES); + else if (next_shared) + ret = zap_bt_entries(mm, bt_addr, bt_addr, + bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); + else + ret = unmap_single_bt(mm, bd_entry, bt_addr); + + return ret; +} + +/* + * A virtual address region being munmap()ed might share bounds table + * with adjacent VMAs. We only need to free the backing physical + * memory of these shared bounds tables entries covered in this virtual + * address region. + */ +static int unmap_edge_bts(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + int ret; + long __user *bde_start, *bde_end; + struct vm_area_struct *prev, *next; + bool prev_shared = false, next_shared = false; + + bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); + bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); + + /* + * Check whether bde_start and bde_end are shared with adjacent + * VMAs. + * + * We already unliked the VMAs from the mm's rbtree so 'start' + * is guaranteed to be in a hole. This gets us the first VMA + * before the hole in to 'prev' and the next VMA after the hole + * in to 'next'. + */ + next = find_vma_prev(mm, start, &prev); + if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1)) + == bde_start) + prev_shared = true; + if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start)) + == bde_end) + next_shared = true; + + /* + * This virtual address region being munmap()ed is only + * covered by one bounds table. + * + * In this case, if this table is also shared with adjacent + * VMAs, only part of the backing physical memory of the bounds + * table need be freeed. Otherwise the whole bounds table need + * be unmapped. + */ + if (bde_start == bde_end) { + return unmap_shared_bt(mm, bde_start, start, end, + prev_shared, next_shared); + } + + /* + * If more than one bounds tables are covered in this virtual + * address region being munmap()ed, we need to separately check + * whether bde_start and bde_end are shared with adjacent VMAs. + */ + ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false); + if (ret) + return ret; + ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared); + if (ret) + return ret; + + return 0; +} + +static int mpx_unmap_tables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + int ret; + long __user *bd_entry, *bde_start, *bde_end; + unsigned long bt_addr; + + /* + * "Edge" bounds tables are those which are being used by the region + * (start -> end), but that may be shared with adjacent areas. If they + * turn out to be completely unshared, they will be freed. If they are + * shared, we will free the backing store (like an MADV_DONTNEED) for + * areas used by this region. + */ + ret = unmap_edge_bts(mm, start, end); + switch (ret) { + /* non-present tables are OK */ + case 0: + case -ENOENT: + /* Success, or no tables to unmap */ + break; + case -EINVAL: + case -EFAULT: + default: + return ret; + } + + /* + * Only unmap the bounds table that are + * 1. fully covered + * 2. not at the edges of the mapping, even if full aligned + */ + bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); + bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); + for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) { + ret = get_bt_addr(mm, bd_entry, &bt_addr); + switch (ret) { + case 0: + break; + case -ENOENT: + /* No table here, try the next one */ + continue; + case -EINVAL: + case -EFAULT: + default: + /* + * Note: we are being strict here. + * Any time we run in to an issue + * unmapping tables, we stop and + * SIGSEGV. + */ + return ret; + } + + ret = unmap_single_bt(mm, bd_entry, bt_addr); + if (ret) + return ret; + } + + return 0; +} + +/* + * Free unused bounds tables covered in a virtual address region being + * munmap()ed. Assume end > start. + * + * This function will be called by do_munmap(), and the VMAs covering + * the virtual address region start...end have already been split if + * necessary, and the 'vma' is the first vma in this range (start -> end). + */ +void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + int ret; + + /* + * Refuse to do anything unless userspace has asked + * the kernel to help manage the bounds tables, + */ + if (!kernel_managing_mpx_tables(current->mm)) + return; + /* + * This will look across the entire 'start -> end' range, + * and find all of the non-VM_MPX VMAs. + * + * To avoid recursion, if a VM_MPX vma is found in the range + * (start->end), we will not continue follow-up work. This + * recursion represents having bounds tables for bounds tables, + * which should not occur normally. Being strict about it here + * helps ensure that we do not have an exploitable stack overflow. + */ + do { + if (vma->vm_flags & VM_MPX) + return; + vma = vma->vm_next; + } while (vma && vma->vm_start < end); + + ret = mpx_unmap_tables(mm, start, end); + if (ret) + force_sig(SIGSEGV, current); +} diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 36de293caf25..536ea2fb6e33 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -384,6 +384,26 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, } /* + * Lookup the PMD entry for a virtual address. Return a pointer to the entry + * or NULL if not present. + */ +pmd_t *lookup_pmd_address(unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + + pgd = pgd_offset_k(address); + if (pgd_none(*pgd)) + return NULL; + + pud = pud_offset(pgd, address); + if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) + return NULL; + + return pmd_offset(pud, address); +} + +/* * This is necessary because __pa() does not work on some * kinds of memory, like vmalloc() or the alloc_remap() * areas on 32-bit NUMA systems. The percpu areas can @@ -485,14 +505,23 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, /* * We are safe now. Check whether the new pgprot is the same: + * Convert protection attributes to 4k-format, as cpa->mask* are set + * up accordingly. */ old_pte = *kpte; - old_prot = req_prot = pte_pgprot(old_pte); + old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte)); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); /* + * req_prot is in format of 4k pages. It must be converted to large + * page format: the caching mode includes the PAT bit located at + * different bit positions in the two formats. + */ + req_prot = pgprot_4k_2_large(req_prot); + + /* * Set the PSE and GLOBAL flags only if the PRESENT flag is * set otherwise pmd_present/pmd_huge will return true even on * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL @@ -585,13 +614,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, paravirt_alloc_pte(&init_mm, page_to_pfn(base)); ref_prot = pte_pgprot(pte_clrhuge(*kpte)); - /* - * If we ever want to utilize the PAT bit, we need to - * update this function to make sure it's converted from - * bit 12 to bit 7 when we cross from the 2MB level to - * the 4K level: - */ - WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE); + + /* promote PAT bit to correct position */ + if (level == PG_LEVEL_2M) + ref_prot = pgprot_large_2_4k(ref_prot); #ifdef CONFIG_X86_64 if (level == PG_LEVEL_1G) { @@ -879,6 +905,7 @@ static int populate_pmd(struct cpa_data *cpa, { unsigned int cur_pages = 0; pmd_t *pmd; + pgprot_t pmd_pgprot; /* * Not on a 2M boundary? @@ -910,6 +937,8 @@ static int populate_pmd(struct cpa_data *cpa, if (num_pages == cur_pages) return cur_pages; + pmd_pgprot = pgprot_4k_2_large(pgprot); + while (end - start >= PMD_SIZE) { /* @@ -921,7 +950,8 @@ static int populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | + massage_pgprot(pmd_pgprot))); start += PMD_SIZE; cpa->pfn += PMD_SIZE; @@ -949,6 +979,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, pud_t *pud; unsigned long end; int cur_pages = 0; + pgprot_t pud_pgprot; end = start + (cpa->numpages << PAGE_SHIFT); @@ -986,12 +1017,14 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, return cur_pages; pud = pud_offset(pgd, start); + pud_pgprot = pgprot_4k_2_large(pgprot); /* * Map everything starting from the Gb boundary, possibly with 1G pages */ while (end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | + massage_pgprot(pud_pgprot))); start += PUD_SIZE; cpa->pfn += PUD_SIZE; @@ -1304,12 +1337,6 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) return 0; } -static inline int cache_attr(pgprot_t attr) -{ - return pgprot_val(attr) & - (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); -} - static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, @@ -1390,7 +1417,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, * No need to flush, when we did not set any of the caching * attributes: */ - cache = cache_attr(mask_set); + cache = !!pgprot2cachemode(mask_set); /* * On success we use CLFLUSH, when the CPU supports it to @@ -1445,7 +1472,8 @@ int _set_memory_uc(unsigned long addr, int numpages) * for now UC MINUS. see comments in ioremap_nocache() */ return change_page_attr_set(&addr, numpages, - __pgprot(_PAGE_CACHE_UC_MINUS), 0); + cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), + 0); } int set_memory_uc(unsigned long addr, int numpages) @@ -1456,7 +1484,7 @@ int set_memory_uc(unsigned long addr, int numpages) * for now UC MINUS. see comments in ioremap_nocache() */ ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, - _PAGE_CACHE_UC_MINUS, NULL); + _PAGE_CACHE_MODE_UC_MINUS, NULL); if (ret) goto out_err; @@ -1474,7 +1502,7 @@ out_err: EXPORT_SYMBOL(set_memory_uc); static int _set_memory_array(unsigned long *addr, int addrinarray, - unsigned long new_type) + enum page_cache_mode new_type) { int i, j; int ret; @@ -1490,11 +1518,13 @@ static int _set_memory_array(unsigned long *addr, int addrinarray, } ret = change_page_attr_set(addr, addrinarray, - __pgprot(_PAGE_CACHE_UC_MINUS), 1); + cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), + 1); - if (!ret && new_type == _PAGE_CACHE_WC) + if (!ret && new_type == _PAGE_CACHE_MODE_WC) ret = change_page_attr_set_clr(addr, addrinarray, - __pgprot(_PAGE_CACHE_WC), + cachemode2pgprot( + _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), 0, CPA_ARRAY, NULL); if (ret) @@ -1511,13 +1541,13 @@ out_free: int set_memory_array_uc(unsigned long *addr, int addrinarray) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); + return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); } EXPORT_SYMBOL(set_memory_array_uc); int set_memory_array_wc(unsigned long *addr, int addrinarray) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); + return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); } EXPORT_SYMBOL(set_memory_array_wc); @@ -1527,10 +1557,12 @@ int _set_memory_wc(unsigned long addr, int numpages) unsigned long addr_copy = addr; ret = change_page_attr_set(&addr, numpages, - __pgprot(_PAGE_CACHE_UC_MINUS), 0); + cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), + 0); if (!ret) { ret = change_page_attr_set_clr(&addr_copy, numpages, - __pgprot(_PAGE_CACHE_WC), + cachemode2pgprot( + _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), 0, 0, NULL); } @@ -1545,7 +1577,7 @@ int set_memory_wc(unsigned long addr, int numpages) return set_memory_uc(addr, numpages); ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, - _PAGE_CACHE_WC, NULL); + _PAGE_CACHE_MODE_WC, NULL); if (ret) goto out_err; @@ -1564,6 +1596,7 @@ EXPORT_SYMBOL(set_memory_wc); int _set_memory_wb(unsigned long addr, int numpages) { + /* WB cache mode is hard wired to all cache attribute bits being 0 */ return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_CACHE_MASK), 0); } @@ -1586,6 +1619,7 @@ int set_memory_array_wb(unsigned long *addr, int addrinarray) int i; int ret; + /* WB cache mode is hard wired to all cache attribute bits being 0 */ ret = change_page_attr_clear(addr, addrinarray, __pgprot(_PAGE_CACHE_MASK), 1); if (ret) @@ -1648,7 +1682,7 @@ int set_pages_uc(struct page *page, int numpages) EXPORT_SYMBOL(set_pages_uc); static int _set_pages_array(struct page **pages, int addrinarray, - unsigned long new_type) + enum page_cache_mode new_type) { unsigned long start; unsigned long end; @@ -1666,10 +1700,11 @@ static int _set_pages_array(struct page **pages, int addrinarray, } ret = cpa_set_pages_array(pages, addrinarray, - __pgprot(_PAGE_CACHE_UC_MINUS)); - if (!ret && new_type == _PAGE_CACHE_WC) + cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS)); + if (!ret && new_type == _PAGE_CACHE_MODE_WC) ret = change_page_attr_set_clr(NULL, addrinarray, - __pgprot(_PAGE_CACHE_WC), + cachemode2pgprot( + _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), 0, CPA_PAGES_ARRAY, pages); if (ret) @@ -1689,13 +1724,13 @@ err_out: int set_pages_array_uc(struct page **pages, int addrinarray) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); + return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); } EXPORT_SYMBOL(set_pages_array_uc); int set_pages_array_wc(struct page **pages, int addrinarray) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); + return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); } EXPORT_SYMBOL(set_pages_array_wc); @@ -1714,6 +1749,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray) unsigned long end; int i; + /* WB cache mode is hard wired to all cache attribute bits being 0 */ retval = cpa_clear_pages_array(pages, addrinarray, __pgprot(_PAGE_CACHE_MASK)); if (retval) @@ -1801,7 +1837,7 @@ static int __set_pages_np(struct page *page, int numpages) return __change_page_attr_set_clr(&cpa, 0); } -void kernel_map_pages(struct page *page, int numpages, int enable) +void __kernel_map_pages(struct page *page, int numpages, int enable) { if (PageHighMem(page)) return; diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 657438858e83..edf299c8ff6c 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -31,6 +31,7 @@ #include <asm/io.h> #include "pat_internal.h" +#include "mm_internal.h" #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; @@ -66,6 +67,75 @@ __setup("debugpat", pat_debug_setup); static u64 __read_mostly boot_pat_state; +#ifdef CONFIG_X86_PAT +/* + * X86 PAT uses page flags WC and Uncached together to keep track of + * memory type of pages that have backing page struct. X86 PAT supports 3 + * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and + * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not + * been changed from its default (value of -1 used to denote this). + * Note we do not support _PAGE_CACHE_MODE_UC here. + */ + +#define _PGMT_DEFAULT 0 +#define _PGMT_WC (1UL << PG_arch_1) +#define _PGMT_UC_MINUS (1UL << PG_uncached) +#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) +#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) +#define _PGMT_CLEAR_MASK (~_PGMT_MASK) + +static inline enum page_cache_mode get_page_memtype(struct page *pg) +{ + unsigned long pg_flags = pg->flags & _PGMT_MASK; + + if (pg_flags == _PGMT_DEFAULT) + return -1; + else if (pg_flags == _PGMT_WC) + return _PAGE_CACHE_MODE_WC; + else if (pg_flags == _PGMT_UC_MINUS) + return _PAGE_CACHE_MODE_UC_MINUS; + else + return _PAGE_CACHE_MODE_WB; +} + +static inline void set_page_memtype(struct page *pg, + enum page_cache_mode memtype) +{ + unsigned long memtype_flags; + unsigned long old_flags; + unsigned long new_flags; + + switch (memtype) { + case _PAGE_CACHE_MODE_WC: + memtype_flags = _PGMT_WC; + break; + case _PAGE_CACHE_MODE_UC_MINUS: + memtype_flags = _PGMT_UC_MINUS; + break; + case _PAGE_CACHE_MODE_WB: + memtype_flags = _PGMT_WB; + break; + default: + memtype_flags = _PGMT_DEFAULT; + break; + } + + do { + old_flags = pg->flags; + new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; + } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); +} +#else +static inline enum page_cache_mode get_page_memtype(struct page *pg) +{ + return -1; +} +static inline void set_page_memtype(struct page *pg, + enum page_cache_mode memtype) +{ +} +#endif + enum { PAT_UC = 0, /* uncached */ PAT_WC = 1, /* Write combining */ @@ -75,6 +145,52 @@ enum { PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ }; +#define CM(c) (_PAGE_CACHE_MODE_ ## c) + +static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg) +{ + enum page_cache_mode cache; + char *cache_mode; + + switch (pat_val) { + case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; + case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; + case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; + case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; + case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; + case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; + default: cache = CM(WB); cache_mode = "WB "; break; + } + + memcpy(msg, cache_mode, 4); + + return cache; +} + +#undef CM + +/* + * Update the cache mode to pgprot translation tables according to PAT + * configuration. + * Using lower indices is preferred, so we start with highest index. + */ +void pat_init_cache_modes(void) +{ + int i; + enum page_cache_mode cache; + char pat_msg[33]; + u64 pat; + + rdmsrl(MSR_IA32_CR_PAT, pat); + pat_msg[32] = 0; + for (i = 7; i >= 0; i--) { + cache = pat_get_cache_mode((pat >> (i * 8)) & 7, + pat_msg + 4 * i); + update_cache_mode_entry(i, cache); + } + pr_info("PAT configuration [0-7]: %s\n", pat_msg); +} + #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) void pat_init(void) @@ -124,8 +240,7 @@ void pat_init(void) wrmsrl(MSR_IA32_CR_PAT, pat); if (boot_cpu) - printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", - smp_processor_id(), boot_pat_state, pat); + pat_init_cache_modes(); } #undef PAT @@ -139,20 +254,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ * The intersection is based on "Effective Memory Type" tables in IA-32 * SDM vol 3a */ -static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) +static unsigned long pat_x_mtrr_type(u64 start, u64 end, + enum page_cache_mode req_type) { /* * Look for MTRR hint to get the effective type in case where PAT * request is for WB. */ - if (req_type == _PAGE_CACHE_WB) { + if (req_type == _PAGE_CACHE_MODE_WB) { u8 mtrr_type; mtrr_type = mtrr_type_lookup(start, end); if (mtrr_type != MTRR_TYPE_WRBACK) - return _PAGE_CACHE_UC_MINUS; + return _PAGE_CACHE_MODE_UC_MINUS; - return _PAGE_CACHE_WB; + return _PAGE_CACHE_MODE_WB; } return req_type; @@ -207,25 +323,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) * - Find the memtype of all the pages in the range, look for any conflicts * - In case of no conflicts, set the new memtype for pages in the range */ -static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, - unsigned long *new_type) +static int reserve_ram_pages_type(u64 start, u64 end, + enum page_cache_mode req_type, + enum page_cache_mode *new_type) { struct page *page; u64 pfn; - if (req_type == _PAGE_CACHE_UC) { + if (req_type == _PAGE_CACHE_MODE_UC) { /* We do not support strong UC */ WARN_ON_ONCE(1); - req_type = _PAGE_CACHE_UC_MINUS; + req_type = _PAGE_CACHE_MODE_UC_MINUS; } for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { - unsigned long type; + enum page_cache_mode type; page = pfn_to_page(pfn); type = get_page_memtype(page); if (type != -1) { - printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", + pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", start, end - 1, type, req_type); if (new_type) *new_type = type; @@ -258,21 +375,21 @@ static int free_ram_pages_type(u64 start, u64 end) /* * req_type typically has one of the: - * - _PAGE_CACHE_WB - * - _PAGE_CACHE_WC - * - _PAGE_CACHE_UC_MINUS - * - _PAGE_CACHE_UC + * - _PAGE_CACHE_MODE_WB + * - _PAGE_CACHE_MODE_WC + * - _PAGE_CACHE_MODE_UC_MINUS + * - _PAGE_CACHE_MODE_UC * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error * it will return a negative return value. */ -int reserve_memtype(u64 start, u64 end, unsigned long req_type, - unsigned long *new_type) +int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, + enum page_cache_mode *new_type) { struct memtype *new; - unsigned long actual_type; + enum page_cache_mode actual_type; int is_range_ram; int err = 0; @@ -281,10 +398,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, if (!pat_enabled) { /* This is identical to page table setting without PAT */ if (new_type) { - if (req_type == _PAGE_CACHE_WC) - *new_type = _PAGE_CACHE_UC_MINUS; + if (req_type == _PAGE_CACHE_MODE_WC) + *new_type = _PAGE_CACHE_MODE_UC_MINUS; else - *new_type = req_type & _PAGE_CACHE_MASK; + *new_type = req_type; } return 0; } @@ -292,7 +409,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, /* Low ISA region is always mapped WB in page table. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) { if (new_type) - *new_type = _PAGE_CACHE_WB; + *new_type = _PAGE_CACHE_MODE_WB; return 0; } @@ -302,7 +419,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, * tools and ACPI tools). Use WB request for WB memory and use * UC_MINUS otherwise. */ - actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); + actual_type = pat_x_mtrr_type(start, end, req_type); if (new_type) *new_type = actual_type; @@ -394,12 +511,12 @@ int free_memtype(u64 start, u64 end) * * Only to be called when PAT is enabled * - * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or - * _PAGE_CACHE_UC + * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS + * or _PAGE_CACHE_MODE_UC */ -static unsigned long lookup_memtype(u64 paddr) +static enum page_cache_mode lookup_memtype(u64 paddr) { - int rettype = _PAGE_CACHE_WB; + enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; struct memtype *entry; if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) @@ -414,7 +531,7 @@ static unsigned long lookup_memtype(u64 paddr) * default state and not reserved, and hence of type WB */ if (rettype == -1) - rettype = _PAGE_CACHE_WB; + rettype = _PAGE_CACHE_MODE_WB; return rettype; } @@ -425,7 +542,7 @@ static unsigned long lookup_memtype(u64 paddr) if (entry != NULL) rettype = entry->type; else - rettype = _PAGE_CACHE_UC_MINUS; + rettype = _PAGE_CACHE_MODE_UC_MINUS; spin_unlock(&memtype_lock); return rettype; @@ -442,11 +559,11 @@ static unsigned long lookup_memtype(u64 paddr) * On failure, returns non-zero */ int io_reserve_memtype(resource_size_t start, resource_size_t end, - unsigned long *type) + enum page_cache_mode *type) { resource_size_t size = end - start; - unsigned long req_type = *type; - unsigned long new_type; + enum page_cache_mode req_type = *type; + enum page_cache_mode new_type; int ret; WARN_ON_ONCE(iomem_map_sanity_check(start, size)); @@ -520,13 +637,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { - unsigned long flags = _PAGE_CACHE_WB; + enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; if (!range_is_allowed(pfn, size)) return 0; if (file->f_flags & O_DSYNC) - flags = _PAGE_CACHE_UC_MINUS; + pcm = _PAGE_CACHE_MODE_UC_MINUS; #ifdef CONFIG_X86_32 /* @@ -543,12 +660,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && (pfn << PAGE_SHIFT) >= __pa(high_memory)) { - flags = _PAGE_CACHE_UC; + pcm = _PAGE_CACHE_MODE_UC; } #endif *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | - flags); + cachemode2protval(pcm)); return 1; } @@ -556,7 +673,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, * Change the memory type for the physial address range in kernel identity * mapping space if that range is a part of identity map. */ -int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) +int kernel_map_sync_memtype(u64 base, unsigned long size, + enum page_cache_mode pcm) { unsigned long id_sz; @@ -574,11 +692,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) __pa(high_memory) - base : size; - if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { + if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " "for [mem %#010Lx-%#010Lx]\n", current->comm, current->pid, - cattr_name(flags), + cattr_name(pcm), base, (unsigned long long)(base + size-1)); return -EINVAL; } @@ -595,8 +713,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, { int is_ram = 0; int ret; - unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); - unsigned long flags = want_flags; + enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); + enum page_cache_mode pcm = want_pcm; is_ram = pat_pagerange_is_ram(paddr, paddr + size); @@ -609,36 +727,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, if (!pat_enabled) return 0; - flags = lookup_memtype(paddr); - if (want_flags != flags) { + pcm = lookup_memtype(paddr); + if (want_pcm != pcm) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, - cattr_name(want_flags), + cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), - cattr_name(flags)); + cattr_name(pcm)); *vma_prot = __pgprot((pgprot_val(*vma_prot) & - (~_PAGE_CACHE_MASK)) | - flags); + (~_PAGE_CACHE_MASK)) | + cachemode2protval(pcm)); } return 0; } - ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); + ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); if (ret) return ret; - if (flags != want_flags) { + if (pcm != want_pcm) { if (strict_prot || - !is_new_memtype_allowed(paddr, size, want_flags, flags)) { + !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, - cattr_name(want_flags), + cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), - cattr_name(flags)); + cattr_name(pcm)); return -EINVAL; } /* @@ -647,10 +765,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, */ *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | - flags); + cachemode2protval(pcm)); } - if (kernel_map_sync_memtype(paddr, size, flags) < 0) { + if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { free_memtype(paddr, paddr + size); return -EINVAL; } @@ -709,7 +827,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; - unsigned long flags; + enum page_cache_mode pcm; /* reserve the whole chunk starting from paddr */ if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { @@ -728,18 +846,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, * For anything smaller than the vma size we set prot based on the * lookup. */ - flags = lookup_memtype(paddr); + pcm = lookup_memtype(paddr); /* Check memtype for the remaining pages */ while (size > PAGE_SIZE) { size -= PAGE_SIZE; paddr += PAGE_SIZE; - if (flags != lookup_memtype(paddr)) + if (pcm != lookup_memtype(paddr)) return -EINVAL; } *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | - flags); + cachemode2protval(pcm)); return 0; } @@ -747,15 +865,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn) { - unsigned long flags; + enum page_cache_mode pcm; if (!pat_enabled) return 0; /* Set prot based on lookup */ - flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); + pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | - flags); + cachemode2protval(pcm)); return 0; } @@ -791,7 +909,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, pgprot_t pgprot_writecombine(pgprot_t prot) { if (pat_enabled) - return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); + return __pgprot(pgprot_val(prot) | + cachemode2protval(_PAGE_CACHE_MODE_WC)); else return pgprot_noncached(prot); } @@ -824,7 +943,7 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos == 0) { ++*pos; - seq_printf(seq, "PAT memtype list:\n"); + seq_puts(seq, "PAT memtype list:\n"); } return memtype_get_idx(*pos); diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h index 77e5ba153fac..f6411620305d 100644 --- a/arch/x86/mm/pat_internal.h +++ b/arch/x86/mm/pat_internal.h @@ -10,30 +10,32 @@ struct memtype { u64 start; u64 end; u64 subtree_max_end; - unsigned long type; + enum page_cache_mode type; struct rb_node rb; }; -static inline char *cattr_name(unsigned long flags) +static inline char *cattr_name(enum page_cache_mode pcm) { - switch (flags & _PAGE_CACHE_MASK) { - case _PAGE_CACHE_UC: return "uncached"; - case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; - case _PAGE_CACHE_WB: return "write-back"; - case _PAGE_CACHE_WC: return "write-combining"; - default: return "broken"; + switch (pcm) { + case _PAGE_CACHE_MODE_UC: return "uncached"; + case _PAGE_CACHE_MODE_UC_MINUS: return "uncached-minus"; + case _PAGE_CACHE_MODE_WB: return "write-back"; + case _PAGE_CACHE_MODE_WC: return "write-combining"; + case _PAGE_CACHE_MODE_WT: return "write-through"; + case _PAGE_CACHE_MODE_WP: return "write-protected"; + default: return "broken"; } } #ifdef CONFIG_X86_PAT extern int rbt_memtype_check_insert(struct memtype *new, - unsigned long *new_type); + enum page_cache_mode *new_type); extern struct memtype *rbt_memtype_erase(u64 start, u64 end); extern struct memtype *rbt_memtype_lookup(u64 addr); extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); #else static inline int rbt_memtype_check_insert(struct memtype *new, - unsigned long *new_type) + enum page_cache_mode *new_type) { return 0; } static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) { return NULL; } diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 415f6c4ced36..6582adcc8bd9 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -122,11 +122,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root, static int memtype_rb_check_conflict(struct rb_root *root, u64 start, u64 end, - unsigned long reqtype, unsigned long *newtype) + enum page_cache_mode reqtype, + enum page_cache_mode *newtype) { struct rb_node *node; struct memtype *match; - int found_type = reqtype; + enum page_cache_mode found_type = reqtype; match = memtype_rb_lowest_match(&memtype_rbroot, start, end); if (match == NULL) @@ -187,7 +188,8 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); } -int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) +int rbt_memtype_check_insert(struct memtype *new, + enum page_cache_mode *ret_type) { int err = 0; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 3f627345d51c..987514396c1e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -24,7 +24,7 @@ extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; extern u8 sk_load_byte_negative_offset[]; -static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) +static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { if (len == 1) *ptr = bytes; @@ -52,12 +52,12 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT4_off32(b1, b2, b3, b4, off) \ do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) -static inline bool is_imm8(int value) +static bool is_imm8(int value) { return value <= 127 && value >= -128; } -static inline bool is_simm32(s64 value) +static bool is_simm32(s64 value) { return value == (s64) (s32) value; } @@ -94,7 +94,7 @@ static int bpf_size_to_x86_bytes(int bpf_size) #define X86_JGE 0x7D #define X86_JG 0x7F -static inline void bpf_flush_icache(void *start, void *end) +static void bpf_flush_icache(void *start, void *end) { mm_segment_t old_fs = get_fs(); @@ -133,24 +133,24 @@ static const int reg2hex[] = { * which need extra byte of encoding. * rax,rcx,...,rbp have simpler encoding */ -static inline bool is_ereg(u32 reg) +static bool is_ereg(u32 reg) { - if (reg == BPF_REG_5 || reg == AUX_REG || - (reg >= BPF_REG_7 && reg <= BPF_REG_9)) - return true; - else - return false; + return (1 << reg) & (BIT(BPF_REG_5) | + BIT(AUX_REG) | + BIT(BPF_REG_7) | + BIT(BPF_REG_8) | + BIT(BPF_REG_9)); } /* add modifiers if 'reg' maps to x64 registers r8..r15 */ -static inline u8 add_1mod(u8 byte, u32 reg) +static u8 add_1mod(u8 byte, u32 reg) { if (is_ereg(reg)) byte |= 1; return byte; } -static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) +static u8 add_2mod(u8 byte, u32 r1, u32 r2) { if (is_ereg(r1)) byte |= 1; @@ -160,13 +160,13 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) } /* encode 'dst_reg' register into x64 opcode 'byte' */ -static inline u8 add_1reg(u8 byte, u32 dst_reg) +static u8 add_1reg(u8 byte, u32 dst_reg) { return byte + reg2hex[dst_reg]; } /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ -static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) +static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } @@ -178,7 +178,7 @@ static void jit_fill_hole(void *area, unsigned int size) } struct jit_context { - unsigned int cleanup_addr; /* epilogue code offset */ + int cleanup_addr; /* epilogue code offset */ bool seen_ld_abs; }; @@ -192,6 +192,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, struct bpf_insn *insn = bpf_prog->insnsi; int insn_cnt = bpf_prog->len; bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); + bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; int i; int proglen = 0; @@ -854,10 +855,11 @@ common_load: goto common_load; case BPF_JMP | BPF_EXIT: - if (i != insn_cnt - 1) { + if (seen_exit) { jmp_offset = ctx->cleanup_addr - addrs[i]; goto emit_jmp; } + seen_exit = true; /* update cleanup_addr */ ctx->cleanup_addr = proglen; /* mov rbx, qword ptr [rbp-X] */ diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 37c1435889ce..9b18ef315a55 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -433,14 +433,14 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, return -EINVAL; if (pat_enabled && write_combine) - prot |= _PAGE_CACHE_WC; + prot |= cachemode2protval(_PAGE_CACHE_MODE_WC); else if (pat_enabled || boot_cpu_data.x86 > 3) /* * ioremap() and ioremap_nocache() defaults to UC MINUS for now. * To avoid attribute conflicts, request UC MINUS here * as well. */ - prot |= _PAGE_CACHE_UC_MINUS; + prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); vma->vm_page_prot = __pgprot(prot); diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c index 7307d9d12d15..2e565e65c893 100644 --- a/arch/x86/pci/numachip.c +++ b/arch/x86/pci/numachip.c @@ -103,7 +103,7 @@ static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus, return 0; } -const struct pci_raw_ops pci_mmcfg_numachip = { +static const struct pci_raw_ops pci_mmcfg_numachip = { .read = pci_mmcfg_read_numachip, .write = pci_mmcfg_write_numachip, }; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 093f5f4272d3..c489ef2c1a39 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -23,6 +23,8 @@ #include <xen/features.h> #include <xen/events.h> #include <asm/xen/pci.h> +#include <asm/xen/cpuid.h> +#include <asm/apic.h> #include <asm/i8259.h> static int xen_pcifront_enable_irq(struct pci_dev *dev) @@ -229,7 +231,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 1; list_for_each_entry(msidesc, &dev->msi_list, list) { - __read_msi_msg(msidesc, &msg); + __pci_read_msi_msg(msidesc, &msg); pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); if (msg.data != XEN_PIRQ_MSI_DATA || @@ -240,7 +242,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) goto error; } xen_msi_compose_msg(dev, pirq, &msg); - __write_msi_msg(msidesc, &msg); + __pci_write_msi_msg(msidesc, &msg); dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); } else { dev_dbg(&dev->dev, @@ -394,14 +396,7 @@ static void xen_teardown_msi_irq(unsigned int irq) { xen_destroy_irq(irq); } -static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) -{ - return 0; -} -static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag) -{ - return 0; -} + #endif int __init pci_xen_init(void) @@ -425,12 +420,33 @@ int __init pci_xen_init(void) x86_msi.setup_msi_irqs = xen_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; - x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; - x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; + pci_msi_ignore_mask = 1; #endif return 0; } +#ifdef CONFIG_PCI_MSI +void __init xen_msi_init(void) +{ + if (!disable_apic) { + /* + * If hardware supports (x2)APIC virtualization (as indicated + * by hypervisor's leaf 4) then we don't need to use pirqs/ + * event channels for MSI handling and instead use regular + * APIC processing + */ + uint32_t eax = cpuid_eax(xen_cpuid_base() + 4); + + if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) || + ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && cpu_has_apic)) + return; + } + + x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; + x86_msi.teardown_msi_irq = xen_teardown_msi_irq; +} +#endif + int __init pci_xen_hvm_init(void) { if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) @@ -445,8 +461,11 @@ int __init pci_xen_hvm_init(void) #endif #ifdef CONFIG_PCI_MSI - x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; + /* + * We need to wait until after x2apic is initialized + * before we can set MSI IRQ ops. + */ + x86_platform.apic_post_init = xen_msi_init; #endif return 0; } @@ -506,8 +525,7 @@ int __init pci_xen_initial_domain(void) x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; - x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; - x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; + pci_msi_ignore_mask = 1; #endif xen_setup_acpi_sci(); __acpi_register_gsi = acpi_register_gsi_xen; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 35aecb6042fb..17e80d829df0 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -48,8 +48,7 @@ static unsigned long efi_flags __initdata; * We allocate runtime services regions bottom-up, starting from -4G, i.e. * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. */ -static u64 efi_va = -4 * (1UL << 30); -#define EFI_VA_END (-68 * (1UL << 30)) +static u64 efi_va = EFI_VA_START; /* * Scratch space used for switching the pagetable in the EFI stub diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c index 4d171e8640ef..735ba21efe91 100644 --- a/arch/x86/platform/iris/iris.c +++ b/arch/x86/platform/iris/iris.c @@ -86,7 +86,6 @@ static int iris_remove(struct platform_device *pdev) static struct platform_driver iris_driver = { .driver = { .name = "iris", - .owner = THIS_MODULE, }, .probe = iris_probe, .remove = iris_remove, diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c index a9acde72d4ed..c5350fd27d70 100644 --- a/arch/x86/platform/olpc/olpc-xo1-pm.c +++ b/arch/x86/platform/olpc/olpc-xo1-pm.c @@ -170,7 +170,6 @@ static int xo1_pm_remove(struct platform_device *pdev) static struct platform_driver cs5535_pms_driver = { .driver = { .name = "cs5535-pms", - .owner = THIS_MODULE, }, .probe = xo1_pm_probe, .remove = xo1_pm_remove, @@ -179,7 +178,6 @@ static struct platform_driver cs5535_pms_driver = { static struct platform_driver cs5535_acpi_driver = { .driver = { .name = "olpc-xo1-pm-acpi", - .owner = THIS_MODULE, }, .probe = xo1_pm_probe, .remove = xo1_pm_remove, diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 3968d67d366b..994798548b1a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1367,23 +1367,25 @@ static int ptc_seq_show(struct seq_file *file, void *data) cpu = *(loff_t *)data; if (!cpu) { - seq_printf(file, - "# cpu bauoff sent stime self locals remotes ncpus localhub "); - seq_printf(file, - "remotehub numuvhubs numuvhubs16 numuvhubs8 "); - seq_printf(file, - "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries "); - seq_printf(file, - "rok resetp resett giveup sto bz throt disable "); - seq_printf(file, - "enable wars warshw warwaits enters ipidis plugged "); - seq_printf(file, - "ipiover glim cong swack recv rtime all one mult "); - seq_printf(file, - "none retry canc nocan reset rcan\n"); + seq_puts(file, + "# cpu bauoff sent stime self locals remotes ncpus localhub "); + seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 "); + seq_puts(file, + "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries "); + seq_puts(file, + "rok resetp resett giveup sto bz throt disable "); + seq_puts(file, + "enable wars warshw warwaits enters ipidis plugged "); + seq_puts(file, + "ipiover glim cong swack recv rtime all one mult "); + seq_puts(file, "none retry canc nocan reset rcan\n"); } if (cpu < num_possible_cpus() && cpu_online(cpu)) { bcp = &per_cpu(bau_control, cpu); + if (bcp->nobau) { + seq_printf(file, "cpu %d bau disabled\n", cpu); + return 0; + } stat = bcp->statp; /* source side statistics */ seq_printf(file, diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index f52e033557c9..2c835e356349 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -24,6 +24,7 @@ quiet_cmd_bin2c = BIN2C $@ $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE $(call if_changed,bin2c) + @: obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index 9fe1b5d002f0..b3560ece1c9f 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -364,3 +364,4 @@ 355 i386 getrandom sys_getrandom 356 i386 memfd_create sys_memfd_create 357 i386 bpf sys_bpf +358 i386 execveat sys_execveat stub32_execveat diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 281150b539a2..8d656fbb57aa 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -328,6 +328,7 @@ 319 common memfd_create sys_memfd_create 320 common kexec_file_load sys_kexec_file_load 321 common bpf sys_bpf +322 64 execveat stub_execveat # # x32-specific system call numbers start at 512 to avoid cache impact @@ -366,3 +367,4 @@ 542 x32 getsockopt compat_sys_getsockopt 543 x32 io_setup compat_sys_io_setup 544 x32 io_submit compat_sys_io_submit +545 x32 execveat stub_x32_execveat diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index 872eb60e7806..ba70ff232917 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -254,7 +254,7 @@ int main(int argc, char **argv) continue; /* Decode an instruction */ - insn_init(&insn, insn_buf, x86_64); + insn_init(&insn, insn_buf, sizeof(insn_buf), x86_64); insn_get_length(&insn); if (insn.next_byte <= insn.kaddr || diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index a5efb21d5228..0c2fae8d929d 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -20,7 +20,10 @@ struct relocs { static struct relocs relocs16; static struct relocs relocs32; +#if ELF_BITS == 64 +static struct relocs relocs32neg; static struct relocs relocs64; +#endif struct section { Elf_Shdr shdr; @@ -762,11 +765,16 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, switch (r_type) { case R_X86_64_NONE: + /* NONE can be ignored. */ + break; + case R_X86_64_PC32: /* - * NONE can be ignored and PC relative relocations don't - * need to be adjusted. + * PC relative relocations don't need to be adjusted unless + * referencing a percpu symbol. */ + if (is_percpu_sym(sym, symname)) + add_reloc(&relocs32neg, offset); break; case R_X86_64_32: @@ -986,7 +994,10 @@ static void emit_relocs(int as_text, int use_real_mode) /* Order the relocations for more efficient processing */ sort_relocs(&relocs16); sort_relocs(&relocs32); +#if ELF_BITS == 64 + sort_relocs(&relocs32neg); sort_relocs(&relocs64); +#endif /* Print the relocations */ if (as_text) { @@ -1007,14 +1018,21 @@ static void emit_relocs(int as_text, int use_real_mode) for (i = 0; i < relocs32.count; i++) write_reloc(relocs32.offset[i], stdout); } else { - if (ELF_BITS == 64) { - /* Print a stop */ - write_reloc(0, stdout); +#if ELF_BITS == 64 + /* Print a stop */ + write_reloc(0, stdout); - /* Now print each relocation */ - for (i = 0; i < relocs64.count; i++) - write_reloc(relocs64.offset[i], stdout); - } + /* Now print each relocation */ + for (i = 0; i < relocs64.count; i++) + write_reloc(relocs64.offset[i], stdout); + + /* Print a stop */ + write_reloc(0, stdout); + + /* Now print each inverse 32-bit relocation */ + for (i = 0; i < relocs32neg.count; i++) + write_reloc(relocs32neg.offset[i], stdout); +#endif /* Print a stop */ write_reloc(0, stdout); diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index 13403fc95a96..56f04db0c9c0 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -149,7 +149,7 @@ int main(int argc, char **argv) break; } /* Decode an instruction */ - insn_init(&insn, insn_buf, x86_64); + insn_init(&insn, insn_buf, sizeof(insn_buf), x86_64); insn_get_length(&insn); if (insn.length != nb) { warnings++; diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index cc04e67bfd05..2d7d9a1f5b53 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -29,20 +29,18 @@ #endif /* CONFIG_X86_32 */ -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP - -#define smp_mb() mb() #ifdef CONFIG_X86_PPRO_FENCE -#define smp_rmb() rmb() +#define dma_rmb() rmb() #else /* CONFIG_X86_PPRO_FENCE */ -#define smp_rmb() barrier() +#define dma_rmb() barrier() #endif /* CONFIG_X86_PPRO_FENCE */ +#define dma_wmb() barrier() -#define smp_wmb() barrier() +#ifdef CONFIG_SMP -#define smp_read_barrier_depends() read_barrier_depends() +#define smp_mb() mb() +#define smp_rmb() dma_rmb() +#define smp_wmb() barrier() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* CONFIG_SMP */ @@ -50,11 +48,13 @@ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif /* CONFIG_SMP */ +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + /* * Stop RDTSC speculation. This is needed when you need to use RDTSC * (or get_cycles or vread that possibly accesses the TSC) in a defined diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index f2f0723070ca..20c3649d0691 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c @@ -31,6 +31,7 @@ #define stub_fork sys_fork #define stub_vfork sys_vfork #define stub_execve sys_execve +#define stub_execveat sys_execveat #define stub_rt_sigreturn sys_rt_sigreturn #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c index 2f94b039e55b..8ec3d1f4ce9a 100644 --- a/arch/x86/vdso/vgetcpu.c +++ b/arch/x86/vdso/vgetcpu.c @@ -7,9 +7,7 @@ #include <linux/kernel.h> #include <linux/getcpu.h> -#include <linux/jiffies.h> #include <linux/time.h> -#include <asm/vsyscall.h> #include <asm/vgtod.h> notrace long diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 970463b566cf..009495b9ab4b 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -1,7 +1,8 @@ /* - * Set up the VMAs to tell the VM about the vDSO. * Copyright 2007 Andi Kleen, SUSE Labs. * Subject to the GPL, v.2 + * + * This contains most of the x86 vDSO kernel-side code. */ #include <linux/mm.h> #include <linux/err.h> @@ -10,17 +11,17 @@ #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> -#include <asm/vsyscall.h> +#include <linux/cpu.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> +#include <asm/vvar.h> #include <asm/page.h> #include <asm/hpet.h> +#include <asm/desc.h> #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; - -extern unsigned short vdso_sync_cpuid; #endif void __init init_vdso_image(const struct vdso_image *image) @@ -38,20 +39,6 @@ void __init init_vdso_image(const struct vdso_image *image) image->alt_len)); } -#if defined(CONFIG_X86_64) -static int __init init_vdso(void) -{ - init_vdso_image(&vdso_image_64); - -#ifdef CONFIG_X86_X32_ABI - init_vdso_image(&vdso_image_x32); -#endif - - return 0; -} -subsys_initcall(init_vdso); -#endif - struct linux_binprm; /* Put the vdso above the (randomized) stack with another randomized offset. @@ -238,3 +225,63 @@ static __init int vdso_setup(char *s) } __setup("vdso=", vdso_setup); #endif + +#ifdef CONFIG_X86_64 +static void vgetcpu_cpu_init(void *arg) +{ + int cpu = smp_processor_id(); + struct desc_struct d = { }; + unsigned long node = 0; +#ifdef CONFIG_NUMA + node = cpu_to_node(cpu); +#endif + if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) + write_rdtscp_aux((node << 12) | cpu); + + /* + * Store cpu number in limit so that it can be loaded + * quickly in user space in vgetcpu. (12 bits for the CPU + * and 8 bits for the node) + */ + d.limit0 = cpu | ((node & 0xf) << 12); + d.limit = node >> 4; + d.type = 5; /* RO data, expand down, accessed */ + d.dpl = 3; /* Visible to user code */ + d.s = 1; /* Not a system segment */ + d.p = 1; /* Present */ + d.d = 1; /* 32-bit */ + + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); +} + +static int +vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg) +{ + long cpu = (long)arg; + + if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) + smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1); + + return NOTIFY_DONE; +} + +static int __init init_vdso(void) +{ + init_vdso_image(&vdso_image_64); + +#ifdef CONFIG_X86_X32_ABI + init_vdso_image(&vdso_image_x32); +#endif + + cpu_notifier_register_begin(); + + on_each_cpu(vgetcpu_cpu_init, NULL, 1); + /* notifier priority > KVM */ + __hotcpu_notifier(vgetcpu_cpu_notifier, 30); + + cpu_notifier_register_done(); + + return 0; +} +subsys_initcall(init_vdso); +#endif /* CONFIG_X86_64 */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index fac5e4f9607c..6bf3a13e3e0f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1100,12 +1100,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) /* Fast syscall setup is all done in hypercalls, so these are all ignored. Stub them out here to stop Xen console noise. */ - break; - - case MSR_IA32_CR_PAT: - if (smp_processor_id() == 0) - xen_set_pat(((u64)high << 32) | low); - break; default: ret = native_write_msr_safe(msr, low, high); @@ -1561,10 +1555,6 @@ asmlinkage __visible void __init xen_start_kernel(void) /* Prevent unwanted bits from being set in PTEs. */ __supported_pte_mask &= ~_PAGE_GLOBAL; -#if 0 - if (!xen_initial_domain()) -#endif - __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); /* * Prevent page tables from being allocated in highmem, even @@ -1618,14 +1608,6 @@ asmlinkage __visible void __init xen_start_kernel(void) */ acpi_numa = -1; #endif -#ifdef CONFIG_X86_PAT - /* - * For right now disable the PAT. We should remove this once - * git commit 8eaffa67b43e99ae581622c5133e20b0f48bcef1 - * (xen/pat: Disable PAT support for now) is reverted. - */ - pat_enabled = 0; -#endif /* Don't do the full vcpu_info placement stuff until we have a possible map and a non-dummy shared_info. */ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; @@ -1636,6 +1618,13 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_raw_console_write("mapping kernel into physical memory\n"); xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); + /* + * Modify the cache mode translation tables to match Xen's PAT + * configuration. + */ + + pat_init_cache_modes(); + /* keep using Xen gdt for now; no urgent need to change it */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a8a1a3d08d4d..5c1f9ace7ae7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -387,7 +387,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) unsigned long mfn; if (!xen_feature(XENFEAT_auto_translated_physmap)) - mfn = get_phys_to_machine(pfn); + mfn = __pfn_to_mfn(pfn); else mfn = pfn; /* @@ -410,13 +410,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) __visible pteval_t xen_pte_val(pte_t pte) { pteval_t pteval = pte.pte; -#if 0 - /* If this is a WC pte, convert back from Xen WC to Linux WC */ - if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { - WARN_ON(!pat_enabled); - pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; - } -#endif + return pte_mfn_to_pfn(pteval); } PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); @@ -427,47 +421,8 @@ __visible pgdval_t xen_pgd_val(pgd_t pgd) } PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); -/* - * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 - * are reserved for now, to correspond to the Intel-reserved PAT - * types. - * - * We expect Linux's PAT set as follows: - * - * Idx PTE flags Linux Xen Default - * 0 WB WB WB - * 1 PWT WC WT WT - * 2 PCD UC- UC- UC- - * 3 PCD PWT UC UC UC - * 4 PAT WB WC WB - * 5 PAT PWT WC WP WT - * 6 PAT PCD UC- rsv UC- - * 7 PAT PCD PWT UC rsv UC - */ - -void xen_set_pat(u64 pat) -{ - /* We expect Linux to use a PAT setting of - * UC UC- WC WB (ignoring the PAT flag) */ - WARN_ON(pat != 0x0007010600070106ull); -} - __visible pte_t xen_make_pte(pteval_t pte) { -#if 0 - /* If Linux is trying to set a WC pte, then map to the Xen WC. - * If _PAGE_PAT is set, then it probably means it is really - * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope - * things work out OK... - * - * (We should never see kernel mappings with _PAGE_PSE set, - * but we could see hugetlbfs mappings, I think.). - */ - if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { - if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) - pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; - } -#endif pte = pte_pfn_to_mfn(pte); return native_make_pte(pte); @@ -1158,20 +1113,16 @@ static void __init xen_cleanhighmap(unsigned long vaddr, * instead of somewhere later and be confusing. */ xen_mc_flush(); } -static void __init xen_pagetable_p2m_copy(void) + +static void __init xen_pagetable_p2m_free(void) { unsigned long size; unsigned long addr; - unsigned long new_mfn_list; - - if (xen_feature(XENFEAT_auto_translated_physmap)) - return; size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); - new_mfn_list = xen_revector_p2m_tree(); /* No memory or already called. */ - if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list) + if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list) return; /* using __ka address and sticking INVALID_P2M_ENTRY! */ @@ -1189,8 +1140,6 @@ static void __init xen_pagetable_p2m_copy(void) size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); memblock_free(__pa(xen_start_info->mfn_list), size); - /* And revector! Bye bye old array */ - xen_start_info->mfn_list = new_mfn_list; /* At this stage, cleanup_highmap has already cleaned __ka space * from _brk_limit way up to the max_pfn_mapped (which is the end of @@ -1214,17 +1163,35 @@ static void __init xen_pagetable_p2m_copy(void) } #endif -static void __init xen_pagetable_init(void) +static void __init xen_pagetable_p2m_setup(void) { - paging_init(); + if (xen_feature(XENFEAT_auto_translated_physmap)) + return; + + xen_vmalloc_p2m_tree(); + #ifdef CONFIG_X86_64 - xen_pagetable_p2m_copy(); + xen_pagetable_p2m_free(); #endif + /* And revector! Bye bye old array */ + xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; +} + +static void __init xen_pagetable_init(void) +{ + paging_init(); + xen_post_allocator_init(); + + xen_pagetable_p2m_setup(); + /* Allocate and initialize top and mid mfn levels for p2m structure */ xen_build_mfn_list_list(); + /* Remap memory freed due to conflicts with E820 map */ + if (!xen_feature(XENFEAT_auto_translated_physmap)) + xen_remap_memory(); + xen_setup_shared_info(); - xen_post_allocator_init(); } static void xen_write_cr2(unsigned long cr2) { @@ -1457,8 +1424,10 @@ static int xen_pgd_alloc(struct mm_struct *mm) page->private = (unsigned long)user_pgd; if (user_pgd != NULL) { +#ifdef CONFIG_X86_VSYSCALL_EMULATION user_pgd[pgd_index(VSYSCALL_ADDR)] = __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); +#endif ret = 0; } @@ -2021,7 +1990,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) # ifdef CONFIG_HIGHMEM case FIX_KMAP_BEGIN ... FIX_KMAP_END: # endif -#else +#elif defined(CONFIG_X86_VSYSCALL_EMULATION) case VSYSCALL_PAGE: #endif case FIX_TEXT_POKE0: @@ -2060,7 +2029,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) __native_set_fixmap(idx, pte); -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_VSYSCALL_EMULATION /* Replicate changes to map the vsyscall page into the user pagetable vsyscall mapping. */ if (idx == VSYSCALL_PAGE) { diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b456b048eca9..edbc7a63fd73 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -3,21 +3,22 @@ * guests themselves, but it must also access and update the p2m array * during suspend/resume when all the pages are reallocated. * - * The p2m table is logically a flat array, but we implement it as a - * three-level tree to allow the address space to be sparse. + * The logical flat p2m table is mapped to a linear kernel memory area. + * For accesses by Xen a three-level tree linked via mfns only is set up to + * allow the address space to be sparse. * - * Xen - * | - * p2m_top p2m_top_mfn - * / \ / \ - * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn - * / \ / \ / / - * p2m p2m p2m p2m p2m p2m p2m ... + * Xen + * | + * p2m_top_mfn + * / \ + * p2m_mid_mfn p2m_mid_mfn + * / / + * p2m p2m p2m ... * * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. * - * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the - * maximum representable pseudo-physical address space is: + * The p2m_top_mfn level is limited to 1 page, so the maximum representable + * pseudo-physical address space is: * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages * * P2M_PER_PAGE depends on the architecture, as a mfn is always @@ -30,6 +31,9 @@ * leaf entries, or for the top root, or middle one, for which there is a void * entry, we assume it is "missing". So (for example) * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. + * We have a dedicated page p2m_missing with all entries being + * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m + * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns. * * We also have the possibility of setting 1-1 mappings on certain regions, so * that: @@ -39,122 +43,20 @@ * PCI BARs, or ACPI spaces), we can create mappings easily because we * get the PFN value to match the MFN. * - * For this to work efficiently we have one new page p2m_identity and - * allocate (via reserved_brk) any other pages we need to cover the sides - * (1GB or 4MB boundary violations). All entries in p2m_identity are set to - * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, - * no other fancy value). + * For this to work efficiently we have one new page p2m_identity. All entries + * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only + * recognizes that and MFNs, no other fancy value). * * On lookup we spot that the entry points to p2m_identity and return the * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. * If the entry points to an allocated page, we just proceed as before and - * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in + * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in * appropriate functions (pfn_to_mfn). * * The reason for having the IDENTITY_FRAME_BIT instead of just returning the * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a * non-identity pfn. To protect ourselves against we elect to set (and get) the * IDENTITY_FRAME_BIT on all identity mapped PFNs. - * - * This simplistic diagram is used to explain the more subtle piece of code. - * There is also a digram of the P2M at the end that can help. - * Imagine your E820 looking as so: - * - * 1GB 2GB 4GB - * /-------------------+---------\/----\ /----------\ /---+-----\ - * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | - * \-------------------+---------/\----/ \----------/ \---+-----/ - * ^- 1029MB ^- 2001MB - * - * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), - * 2048MB = 524288 (0x80000)] - * - * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB - * is actually not present (would have to kick the balloon driver to put it in). - * - * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: - * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start - * of the PFN and the end PFN (263424 and 512256 respectively). The first step - * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page - * covers 512^2 of page estate (1GB) and in case the start or end PFN is not - * aligned on 512^2*PAGE_SIZE (1GB) we reserve_brk new middle and leaf pages as - * required to split any existing p2m_mid_missing middle pages. - * - * With the E820 example above, 263424 is not 1GB aligned so we allocate a - * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. - * Each entry in the allocate page is "missing" (points to p2m_missing). - * - * Next stage is to determine if we need to do a more granular boundary check - * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. - * We check if the start pfn and end pfn violate that boundary check, and if - * so reserve_brk a (p2m[x][y]) leaf page. This way we have a much finer - * granularity of setting which PFNs are missing and which ones are identity. - * In our example 263424 and 512256 both fail the check so we reserve_brk two - * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" - * values) and assign them to p2m[1][2] and p2m[1][488] respectively. - * - * At this point we would at minimum reserve_brk one page, but could be up to - * three. Each call to set_phys_range_identity has at maximum a three page - * cost. If we were to query the P2M at this stage, all those entries from - * start PFN through end PFN (so 1029MB -> 2001MB) would return - * INVALID_P2M_ENTRY ("missing"). - * - * The next step is to walk from the start pfn to the end pfn setting - * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. - * If we find that the middle entry is pointing to p2m_missing we can swap it - * over to p2m_identity - this way covering 4MB (or 2MB) PFN space (and - * similarly swapping p2m_mid_missing for p2m_mid_identity for larger regions). - * At this point we do not need to worry about boundary aligment (so no need to - * reserve_brk a middle page, figure out which PFNs are "missing" and which - * ones are identity), as that has been done earlier. If we find that the - * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference - * that page (which covers 512 PFNs) and set the appropriate PFN with - * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we - * set from p2m[1][2][256->511] and p2m[1][488][0->256] with - * IDENTITY_FRAME_BIT set. - * - * All other regions that are void (or not filled) either point to p2m_missing - * (considered missing) or have the default value of INVALID_P2M_ENTRY (also - * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] - * contain the INVALID_P2M_ENTRY value and are considered "missing." - * - * Finally, the region beyond the end of of the E820 (4 GB in this example) - * is set to be identity (in case there are MMIO regions placed here). - * - * This is what the p2m ends up looking (for the E820 above) with this - * fabulous drawing: - * - * p2m /--------------\ - * /-----\ | &mfn_list[0],| /-----------------\ - * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | - * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | - * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | - * |-----| \ | [p2m_identity]+\\ | .... | - * | 2 |--\ \-------------------->| ... | \\ \----------------/ - * |-----| \ \---------------/ \\ - * | 3 |-\ \ \\ p2m_identity [1] - * |-----| \ \-------------------->/---------------\ /-----------------\ - * | .. |\ | | [p2m_identity]+-->| ~0, ~0, ~0, ... | - * \-----/ | | | [p2m_identity]+-->| ..., ~0 | - * | | | .... | \-----------------/ - * | | +-[x], ~0, ~0.. +\ - * | | \---------------/ \ - * | | \-> /---------------\ - * | V p2m_mid_missing p2m_missing | IDENTITY[@0] | - * | /-----------------\ /------------\ | IDENTITY[@256]| - * | | [p2m_missing] +---->| ~0, ~0, ...| | ~0, ~0, .... | - * | | [p2m_missing] +---->| ..., ~0 | \---------------/ - * | | ... | \------------/ - * | \-----------------/ - * | - * | p2m_mid_identity - * | /-----------------\ - * \-->| [p2m_identity] +---->[1] - * | [p2m_identity] +---->[1] - * | ... | - * \-----------------/ - * - * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) */ #include <linux/init.h> @@ -164,9 +66,11 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/bootmem.h> +#include <linux/slab.h> #include <asm/cache.h> #include <asm/setup.h> +#include <asm/uaccess.h> #include <asm/xen/page.h> #include <asm/xen/hypercall.h> @@ -178,31 +82,26 @@ #include "multicalls.h" #include "xen-ops.h" +#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE) + static void __init m2p_override_init(void); +unsigned long *xen_p2m_addr __read_mostly; +EXPORT_SYMBOL_GPL(xen_p2m_addr); +unsigned long xen_p2m_size __read_mostly; +EXPORT_SYMBOL_GPL(xen_p2m_size); unsigned long xen_max_p2m_pfn __read_mostly; +EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); + +static DEFINE_SPINLOCK(p2m_update_lock); static unsigned long *p2m_mid_missing_mfn; static unsigned long *p2m_top_mfn; static unsigned long **p2m_top_mfn_p; - -/* Placeholders for holes in the address space */ -static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); - -static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); - -static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE); - -RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); - -/* For each I/O range remapped we may lose up to two leaf pages for the boundary - * violations and three mid pages to cover up to 3GB. With - * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the - * remapped region. - */ -RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES); +static unsigned long *p2m_missing; +static unsigned long *p2m_identity; +static pte_t *p2m_missing_pte; +static pte_t *p2m_identity_pte; static inline unsigned p2m_top_index(unsigned long pfn) { @@ -220,14 +119,6 @@ static inline unsigned p2m_index(unsigned long pfn) return pfn % P2M_PER_PAGE; } -static void p2m_top_init(unsigned long ***top) -{ - unsigned i; - - for (i = 0; i < P2M_TOP_PER_PAGE; i++) - top[i] = p2m_mid_missing; -} - static void p2m_top_mfn_init(unsigned long *top) { unsigned i; @@ -244,28 +135,43 @@ static void p2m_top_mfn_p_init(unsigned long **top) top[i] = p2m_mid_missing_mfn; } -static void p2m_mid_init(unsigned long **mid, unsigned long *leaf) +static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) - mid[i] = leaf; + mid[i] = virt_to_mfn(leaf); } -static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf) +static void p2m_init(unsigned long *p2m) { unsigned i; - for (i = 0; i < P2M_MID_PER_PAGE; i++) - mid[i] = virt_to_mfn(leaf); + for (i = 0; i < P2M_PER_PAGE; i++) + p2m[i] = INVALID_P2M_ENTRY; } -static void p2m_init(unsigned long *p2m) +static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) { unsigned i; - for (i = 0; i < P2M_MID_PER_PAGE; i++) - p2m[i] = INVALID_P2M_ENTRY; + for (i = 0; i < P2M_PER_PAGE; i++) + p2m[i] = IDENTITY_FRAME(pfn + i); +} + +static void * __ref alloc_p2m_page(void) +{ + if (unlikely(!slab_is_available())) + return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); + + return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); +} + +/* Only to be called in case of a race for a page just allocated! */ +static void free_p2m_page(void *p) +{ + BUG_ON(!slab_is_available()); + free_page((unsigned long)p); } /* @@ -280,40 +186,46 @@ static void p2m_init(unsigned long *p2m) */ void __ref xen_build_mfn_list_list(void) { - unsigned long pfn; + unsigned long pfn, mfn; + pte_t *ptep; + unsigned int level, topidx, mididx; + unsigned long *mid_mfn_p; if (xen_feature(XENFEAT_auto_translated_physmap)) return; /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { - p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); + p2m_mid_missing_mfn = alloc_p2m_page(); p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); - p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); + p2m_top_mfn_p = alloc_p2m_page(); p2m_top_mfn_p_init(p2m_top_mfn_p); - p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); + p2m_top_mfn = alloc_p2m_page(); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); } - for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx = p2m_mid_index(pfn); - unsigned long **mid; - unsigned long *mid_mfn_p; + for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN; + pfn += P2M_PER_PAGE) { + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); - mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; + ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), + &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); + mfn = pte_mfn(*ptep); + ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); /* Don't bother allocating any mfn mid levels if * they're just missing, just update the stored mfn, * since all could have changed over a migrate. */ - if (mid == p2m_mid_missing) { + if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); @@ -322,19 +234,14 @@ void __ref xen_build_mfn_list_list(void) } if (mid_mfn_p == p2m_mid_missing_mfn) { - /* - * XXX boot-time only! We should never find - * missing parts of the mfn tree after - * runtime. - */ - mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); + mid_mfn_p = alloc_p2m_page(); p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); - mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); + mid_mfn_p[mididx] = mfn; } } @@ -353,171 +260,235 @@ void xen_setup_mfn_list_list(void) /* Set up p2m_top to point to the domain-builder provided p2m pages */ void __init xen_build_dynamic_phys_to_machine(void) { - unsigned long *mfn_list; - unsigned long max_pfn; unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return; - mfn_list = (unsigned long *)xen_start_info->mfn_list; - max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); - xen_max_p2m_pfn = max_pfn; + xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list; + xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE); - p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_init(p2m_missing); - p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_init(p2m_identity); + for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++) + xen_p2m_addr[pfn] = INVALID_P2M_ENTRY; - p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(p2m_mid_missing, p2m_missing); - p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(p2m_mid_identity, p2m_identity); + xen_max_p2m_pfn = xen_p2m_size; +} - p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_top_init(p2m_top); +#define P2M_TYPE_IDENTITY 0 +#define P2M_TYPE_MISSING 1 +#define P2M_TYPE_PFN 2 +#define P2M_TYPE_UNKNOWN 3 - /* - * The domain builder gives us a pre-constructed p2m array in - * mfn_list for all the pages initially given to us, so we just - * need to graft that into our tree structure. - */ - for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx = p2m_mid_index(pfn); +static int xen_p2m_elem_type(unsigned long pfn) +{ + unsigned long mfn; - if (p2m_top[topidx] == p2m_mid_missing) { - unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(mid, p2m_missing); + if (pfn >= xen_p2m_size) + return P2M_TYPE_IDENTITY; - p2m_top[topidx] = mid; - } + mfn = xen_p2m_addr[pfn]; - /* - * As long as the mfn_list has enough entries to completely - * fill a p2m page, pointing into the array is ok. But if - * not the entries beyond the last pfn will be undefined. - */ - if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { - unsigned long p2midx; + if (mfn == INVALID_P2M_ENTRY) + return P2M_TYPE_MISSING; - p2midx = max_pfn % P2M_PER_PAGE; - for ( ; p2midx < P2M_PER_PAGE; p2midx++) - mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; - } - p2m_top[topidx][mididx] = &mfn_list[pfn]; - } + if (mfn & IDENTITY_FRAME_BIT) + return P2M_TYPE_IDENTITY; - m2p_override_init(); + return P2M_TYPE_PFN; } -#ifdef CONFIG_X86_64 -unsigned long __init xen_revector_p2m_tree(void) + +static void __init xen_rebuild_p2m_list(unsigned long *p2m) { - unsigned long va_start; - unsigned long va_end; + unsigned int i, chunk; unsigned long pfn; - unsigned long pfn_free = 0; - unsigned long *mfn_list = NULL; - unsigned long size; - - va_start = xen_start_info->mfn_list; - /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long), - * so make sure it is rounded up to that */ - size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); - va_end = va_start + size; - - /* If we were revectored already, don't do it again. */ - if (va_start <= __START_KERNEL_map && va_start >= __PAGE_OFFSET) - return 0; + unsigned long *mfns; + pte_t *ptep; + pmd_t *pmdp; + int type; - mfn_list = alloc_bootmem_align(size, PAGE_SIZE); - if (!mfn_list) { - pr_warn("Could not allocate space for a new P2M tree!\n"); - return xen_start_info->mfn_list; - } - /* Fill it out with INVALID_P2M_ENTRY value */ - memset(mfn_list, 0xFF, size); + p2m_missing = alloc_p2m_page(); + p2m_init(p2m_missing); + p2m_identity = alloc_p2m_page(); + p2m_init(p2m_identity); - for (pfn = 0; pfn < ALIGN(MAX_DOMAIN_PAGES, P2M_PER_PAGE); pfn += P2M_PER_PAGE) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx; - unsigned long *mid_p; + p2m_missing_pte = alloc_p2m_page(); + paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT); + p2m_identity_pte = alloc_p2m_page(); + paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); + for (i = 0; i < PTRS_PER_PTE; i++) { + set_pte(p2m_missing_pte + i, + pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO)); + set_pte(p2m_identity_pte + i, + pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO)); + } - if (!p2m_top[topidx]) + for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) { + /* + * Try to map missing/identity PMDs or p2m-pages if possible. + * We have to respect the structure of the mfn_list_list + * which will be built just afterwards. + * Chunk size to test is one p2m page if we are in the middle + * of a mfn_list_list mid page and the complete mid page area + * if we are at index 0 of the mid page. Please note that a + * mid page might cover more than one PMD, e.g. on 32 bit PAE + * kernels. + */ + chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ? + P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE; + + type = xen_p2m_elem_type(pfn); + i = 0; + if (type != P2M_TYPE_PFN) + for (i = 1; i < chunk; i++) + if (xen_p2m_elem_type(pfn + i) != type) + break; + if (i < chunk) + /* Reset to minimal chunk size. */ + chunk = P2M_PER_PAGE; + + if (type == P2M_TYPE_PFN || i < chunk) { + /* Use initial p2m page contents. */ +#ifdef CONFIG_X86_64 + mfns = alloc_p2m_page(); + copy_page(mfns, xen_p2m_addr + pfn); +#else + mfns = xen_p2m_addr + pfn; +#endif + ptep = populate_extra_pte((unsigned long)(p2m + pfn)); + set_pte(ptep, + pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); continue; + } - if (p2m_top[topidx] == p2m_mid_missing) + if (chunk == P2M_PER_PAGE) { + /* Map complete missing or identity p2m-page. */ + mfns = (type == P2M_TYPE_MISSING) ? + p2m_missing : p2m_identity; + ptep = populate_extra_pte((unsigned long)(p2m + pfn)); + set_pte(ptep, + pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO)); continue; + } - mididx = p2m_mid_index(pfn); - mid_p = p2m_top[topidx][mididx]; - if (!mid_p) - continue; - if ((mid_p == p2m_missing) || (mid_p == p2m_identity)) - continue; + /* Complete missing or identity PMD(s) can be mapped. */ + ptep = (type == P2M_TYPE_MISSING) ? + p2m_missing_pte : p2m_identity_pte; + for (i = 0; i < PMDS_PER_MID_PAGE; i++) { + pmdp = populate_extra_pmd( + (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); + set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); + } + } +} - if ((unsigned long)mid_p == INVALID_P2M_ENTRY) - continue; +void __init xen_vmalloc_p2m_tree(void) +{ + static struct vm_struct vm; - /* The old va. Rebase it on mfn_list */ - if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) { - unsigned long *new; + vm.flags = VM_ALLOC; + vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, + PMD_SIZE * PMDS_PER_MID_PAGE); + vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); + pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); - if (pfn_free > (size / sizeof(unsigned long))) { - WARN(1, "Only allocated for %ld pages, but we want %ld!\n", - size / sizeof(unsigned long), pfn_free); - return 0; - } - new = &mfn_list[pfn_free]; + xen_max_p2m_pfn = vm.size / sizeof(unsigned long); - copy_page(new, mid_p); - p2m_top[topidx][mididx] = &mfn_list[pfn_free]; + xen_rebuild_p2m_list(vm.addr); - pfn_free += P2M_PER_PAGE; + xen_p2m_addr = vm.addr; + xen_p2m_size = xen_max_p2m_pfn; - } - /* This should be the leafs allocated for identity from _brk. */ - } - return (unsigned long)mfn_list; + xen_inv_extra_mem(); + m2p_override_init(); } -#else -unsigned long __init xen_revector_p2m_tree(void) -{ - return 0; -} -#endif + unsigned long get_phys_to_machine(unsigned long pfn) { - unsigned topidx, mididx, idx; + pte_t *ptep; + unsigned int level; + + if (unlikely(pfn >= xen_p2m_size)) { + if (pfn < xen_max_p2m_pfn) + return xen_chk_extra_mem(pfn); - if (unlikely(pfn >= MAX_P2M_PFN)) return IDENTITY_FRAME(pfn); + } - topidx = p2m_top_index(pfn); - mididx = p2m_mid_index(pfn); - idx = p2m_index(pfn); + ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); /* * The INVALID_P2M_ENTRY is filled in both p2m_*identity * and in p2m_*missing, so returning the INVALID_P2M_ENTRY * would be wrong. */ - if (p2m_top[topidx][mididx] == p2m_identity) + if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) return IDENTITY_FRAME(pfn); - return p2m_top[topidx][mididx][idx]; + return xen_p2m_addr[pfn]; } EXPORT_SYMBOL_GPL(get_phys_to_machine); -static void *alloc_p2m_page(void) +/* + * Allocate new pmd(s). It is checked whether the old pmd is still in place. + * If not, nothing is changed. This is okay as the only reason for allocating + * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual + * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! + */ +static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) { - return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); -} + pte_t *ptechk; + pte_t *pteret = ptep; + pte_t *pte_newpg[PMDS_PER_MID_PAGE]; + pmd_t *pmdp; + unsigned int level; + unsigned long flags; + unsigned long vaddr; + int i; -static void free_p2m_page(void *p) -{ - free_page((unsigned long)p); + /* Do all allocations first to bail out in error case. */ + for (i = 0; i < PMDS_PER_MID_PAGE; i++) { + pte_newpg[i] = alloc_p2m_page(); + if (!pte_newpg[i]) { + for (i--; i >= 0; i--) + free_p2m_page(pte_newpg[i]); + + return NULL; + } + } + + vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1); + + for (i = 0; i < PMDS_PER_MID_PAGE; i++) { + copy_page(pte_newpg[i], pte_pg); + paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT); + + pmdp = lookup_pmd_address(vaddr); + BUG_ON(!pmdp); + + spin_lock_irqsave(&p2m_update_lock, flags); + + ptechk = lookup_address(vaddr, &level); + if (ptechk == pte_pg) { + set_pmd(pmdp, + __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); + if (vaddr == (addr & ~(PMD_SIZE - 1))) + pteret = pte_offset_kernel(pmdp, addr); + pte_newpg[i] = NULL; + } + + spin_unlock_irqrestore(&p2m_update_lock, flags); + + if (pte_newpg[i]) { + paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT); + free_p2m_page(pte_newpg[i]); + } + + vaddr += PMD_SIZE; + } + + return pteret; } /* @@ -530,58 +501,62 @@ static void free_p2m_page(void *p) static bool alloc_p2m(unsigned long pfn) { unsigned topidx, mididx; - unsigned long ***top_p, **mid; unsigned long *top_mfn_p, *mid_mfn; - unsigned long *p2m_orig; + pte_t *ptep, *pte_pg; + unsigned int level; + unsigned long flags; + unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); + unsigned long p2m_pfn; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); - top_p = &p2m_top[topidx]; - mid = ACCESS_ONCE(*top_p); + ptep = lookup_address(addr, &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); + pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); - if (mid == p2m_mid_missing) { - /* Mid level is missing, allocate a new one */ - mid = alloc_p2m_page(); - if (!mid) + if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { + /* PMD level is missing, allocate a new one */ + ptep = alloc_p2m_pmd(addr, ptep, pte_pg); + if (!ptep) return false; - - p2m_mid_init(mid, p2m_missing); - - if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) - free_p2m_page(mid); } - top_mfn_p = &p2m_top_mfn[topidx]; - mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]); + if (p2m_top_mfn) { + top_mfn_p = &p2m_top_mfn[topidx]; + mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]); - BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); + BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); - if (mid_mfn == p2m_mid_missing_mfn) { - /* Separately check the mid mfn level */ - unsigned long missing_mfn; - unsigned long mid_mfn_mfn; - unsigned long old_mfn; + if (mid_mfn == p2m_mid_missing_mfn) { + /* Separately check the mid mfn level */ + unsigned long missing_mfn; + unsigned long mid_mfn_mfn; + unsigned long old_mfn; - mid_mfn = alloc_p2m_page(); - if (!mid_mfn) - return false; + mid_mfn = alloc_p2m_page(); + if (!mid_mfn) + return false; - p2m_mid_mfn_init(mid_mfn, p2m_missing); + p2m_mid_mfn_init(mid_mfn, p2m_missing); - missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); - mid_mfn_mfn = virt_to_mfn(mid_mfn); - old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn); - if (old_mfn != missing_mfn) { - free_p2m_page(mid_mfn); - mid_mfn = mfn_to_virt(old_mfn); - } else { - p2m_top_mfn_p[topidx] = mid_mfn; + missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); + mid_mfn_mfn = virt_to_mfn(mid_mfn); + old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn); + if (old_mfn != missing_mfn) { + free_p2m_page(mid_mfn); + mid_mfn = mfn_to_virt(old_mfn); + } else { + p2m_top_mfn_p[topidx] = mid_mfn; + } } + } else { + mid_mfn = NULL; } - p2m_orig = ACCESS_ONCE(p2m_top[topidx][mididx]); - if (p2m_orig == p2m_identity || p2m_orig == p2m_missing) { + p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep)); + if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || + p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { /* p2m leaf page is missing */ unsigned long *p2m; @@ -589,183 +564,36 @@ static bool alloc_p2m(unsigned long pfn) if (!p2m) return false; - p2m_init(p2m); - - if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) - free_p2m_page(p2m); + if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) + p2m_init(p2m); else - mid_mfn[mididx] = virt_to_mfn(p2m); - } - - return true; -} - -static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary) -{ - unsigned topidx, mididx, idx; - unsigned long *p2m; - - topidx = p2m_top_index(pfn); - mididx = p2m_mid_index(pfn); - idx = p2m_index(pfn); - - /* Pfff.. No boundary cross-over, lets get out. */ - if (!idx && check_boundary) - return false; - - WARN(p2m_top[topidx][mididx] == p2m_identity, - "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", - topidx, mididx); - - /* - * Could be done by xen_build_dynamic_phys_to_machine.. - */ - if (p2m_top[topidx][mididx] != p2m_missing) - return false; - - /* Boundary cross-over for the edges: */ - p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); - - p2m_init(p2m); + p2m_init_identity(p2m, pfn); - p2m_top[topidx][mididx] = p2m; + spin_lock_irqsave(&p2m_update_lock, flags); - return true; -} - -static bool __init early_alloc_p2m_middle(unsigned long pfn) -{ - unsigned topidx = p2m_top_index(pfn); - unsigned long **mid; - - mid = p2m_top[topidx]; - if (mid == p2m_mid_missing) { - mid = extend_brk(PAGE_SIZE, PAGE_SIZE); - - p2m_mid_init(mid, p2m_missing); - - p2m_top[topidx] = mid; - } - return true; -} - -/* - * Skim over the P2M tree looking at pages that are either filled with - * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and - * replace the P2M leaf with a p2m_missing or p2m_identity. - * Stick the old page in the new P2M tree location. - */ -static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn) -{ - unsigned topidx; - unsigned mididx; - unsigned ident_pfns; - unsigned inv_pfns; - unsigned long *p2m; - unsigned idx; - unsigned long pfn; - - /* We only look when this entails a P2M middle layer */ - if (p2m_index(set_pfn)) - return false; - - for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { - topidx = p2m_top_index(pfn); - - if (!p2m_top[topidx]) - continue; - - if (p2m_top[topidx] == p2m_mid_missing) - continue; - - mididx = p2m_mid_index(pfn); - p2m = p2m_top[topidx][mididx]; - if (!p2m) - continue; - - if ((p2m == p2m_missing) || (p2m == p2m_identity)) - continue; - - if ((unsigned long)p2m == INVALID_P2M_ENTRY) - continue; - - ident_pfns = 0; - inv_pfns = 0; - for (idx = 0; idx < P2M_PER_PAGE; idx++) { - /* IDENTITY_PFNs are 1:1 */ - if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) - ident_pfns++; - else if (p2m[idx] == INVALID_P2M_ENTRY) - inv_pfns++; - else - break; + if (pte_pfn(*ptep) == p2m_pfn) { + set_pte(ptep, + pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL)); + if (mid_mfn) + mid_mfn[mididx] = virt_to_mfn(p2m); + p2m = NULL; } - if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) - goto found; - } - return false; -found: - /* Found one, replace old with p2m_identity or p2m_missing */ - p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); - - /* Reset where we want to stick the old page in. */ - topidx = p2m_top_index(set_pfn); - mididx = p2m_mid_index(set_pfn); - - /* This shouldn't happen */ - if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) - early_alloc_p2m_middle(set_pfn); - - if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) - return false; - - p2m_init(p2m); - p2m_top[topidx][mididx] = p2m; - return true; -} -bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - if (unlikely(!__set_phys_to_machine(pfn, mfn))) { - if (!early_alloc_p2m_middle(pfn)) - return false; - - if (early_can_reuse_p2m_middle(pfn)) - return __set_phys_to_machine(pfn, mfn); - - if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/)) - return false; + spin_unlock_irqrestore(&p2m_update_lock, flags); - if (!__set_phys_to_machine(pfn, mfn)) - return false; + if (p2m) + free_p2m_page(p2m); } return true; } -static void __init early_split_p2m(unsigned long pfn) -{ - unsigned long mididx, idx; - - mididx = p2m_mid_index(pfn); - idx = p2m_index(pfn); - - /* - * Allocate new middle and leaf pages if this pfn lies in the - * middle of one. - */ - if (mididx || idx) - early_alloc_p2m_middle(pfn); - if (idx) - early_alloc_p2m(pfn, false); -} - unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; - if (unlikely(pfn_s >= MAX_P2M_PFN)) + if (unlikely(pfn_s >= xen_p2m_size)) return 0; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) @@ -774,101 +602,51 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, if (pfn_s > pfn_e) return 0; - if (pfn_e > MAX_P2M_PFN) - pfn_e = MAX_P2M_PFN; - - early_split_p2m(pfn_s); - early_split_p2m(pfn_e); - - for (pfn = pfn_s; pfn < pfn_e;) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx = p2m_mid_index(pfn); - - if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) - break; - pfn++; - - /* - * If the PFN was set to a middle or leaf identity - * page the remainder must also be identity, so skip - * ahead to the next middle or leaf entry. - */ - if (p2m_top[topidx] == p2m_mid_identity) - pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE); - else if (p2m_top[topidx][mididx] == p2m_identity) - pfn = ALIGN(pfn, P2M_PER_PAGE); - } + if (pfn_e > xen_p2m_size) + pfn_e = xen_p2m_size; - WARN((pfn - pfn_s) != (pfn_e - pfn_s), - "Identity mapping failed. We are %ld short of 1-1 mappings!\n", - (pfn_e - pfn_s) - (pfn - pfn_s)); + for (pfn = pfn_s; pfn < pfn_e; pfn++) + xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn); return pfn - pfn_s; } -/* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { - unsigned topidx, mididx, idx; + pte_t *ptep; + unsigned int level; /* don't track P2M changes in autotranslate guests */ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return true; - if (unlikely(pfn >= MAX_P2M_PFN)) { + if (unlikely(pfn >= xen_p2m_size)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; } - topidx = p2m_top_index(pfn); - mididx = p2m_mid_index(pfn); - idx = p2m_index(pfn); - - /* For sparse holes were the p2m leaf has real PFN along with - * PCI holes, stick in the PFN as the MFN value. - * - * set_phys_range_identity() will have allocated new middle - * and leaf pages as required so an existing p2m_mid_missing - * or p2m_missing mean that whole range will be identity so - * these can be switched to p2m_mid_identity or p2m_identity. - */ - if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { - if (p2m_top[topidx] == p2m_mid_identity) - return true; - - if (p2m_top[topidx] == p2m_mid_missing) { - WARN_ON(cmpxchg(&p2m_top[topidx], p2m_mid_missing, - p2m_mid_identity) != p2m_mid_missing); - return true; - } - - if (p2m_top[topidx][mididx] == p2m_identity) - return true; + if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) + return true; - /* Swap over from MISSING to IDENTITY if needed. */ - if (p2m_top[topidx][mididx] == p2m_missing) { - WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, - p2m_identity) != p2m_missing); - return true; - } - } + ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); - if (p2m_top[topidx][mididx] == p2m_missing) + if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing))) return mfn == INVALID_P2M_ENTRY; - p2m_top[topidx][mididx][idx] = mfn; + if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) + return mfn == IDENTITY_FRAME(pfn); - return true; + return false; } bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { - if (unlikely(!__set_phys_to_machine(pfn, mfn))) { + if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!alloc_p2m(pfn)) return false; - if (!__set_phys_to_machine(pfn, mfn)) - return false; + return __set_phys_to_machine(pfn, mfn); } return true; @@ -877,15 +655,16 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) #define M2P_OVERRIDE_HASH_SHIFT 10 #define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT) -static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH); +static struct list_head *m2p_overrides; static DEFINE_SPINLOCK(m2p_override_lock); static void __init m2p_override_init(void) { unsigned i; - m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH, - sizeof(unsigned long)); + m2p_overrides = alloc_bootmem_align( + sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH, + sizeof(unsigned long)); for (i = 0; i < M2P_OVERRIDE_HASH; i++) INIT_LIST_HEAD(&m2p_overrides[i]); @@ -896,68 +675,9 @@ static unsigned long mfn_hash(unsigned long mfn) return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT); } -int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count) -{ - int i, ret = 0; - bool lazy = false; - pte_t *pte; - - if (xen_feature(XENFEAT_auto_translated_physmap)) - return 0; - - if (kmap_ops && - !in_interrupt() && - paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { - arch_enter_lazy_mmu_mode(); - lazy = true; - } - - for (i = 0; i < count; i++) { - unsigned long mfn, pfn; - - /* Do not add to override if the map failed. */ - if (map_ops[i].status) - continue; - - if (map_ops[i].flags & GNTMAP_contains_pte) { - pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + - (map_ops[i].host_addr & ~PAGE_MASK)); - mfn = pte_mfn(*pte); - } else { - mfn = PFN_DOWN(map_ops[i].dev_bus_addr); - } - pfn = page_to_pfn(pages[i]); - - WARN_ON(PagePrivate(pages[i])); - SetPagePrivate(pages[i]); - set_page_private(pages[i], mfn); - pages[i]->index = pfn_to_mfn(pfn); - - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { - ret = -ENOMEM; - goto out; - } - - if (kmap_ops) { - ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]); - if (ret) - goto out; - } - } - -out: - if (lazy) - arch_leave_lazy_mmu_mode(); - - return ret; -} -EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); - /* Add an MFN override for a particular page */ -int m2p_add_override(unsigned long mfn, struct page *page, - struct gnttab_map_grant_ref *kmap_op) +static int m2p_add_override(unsigned long mfn, struct page *page, + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long pfn; @@ -970,7 +690,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, - "m2p_add_override: pfn %lx not mapped", pfn)) + "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } @@ -1004,19 +724,19 @@ int m2p_add_override(unsigned long mfn, struct page *page, * because mfn_to_pfn (that ends up being called by GUPF) will * return the backend pfn rather than the frontend pfn. */ pfn = mfn_to_pfn_no_overrides(mfn); - if (get_phys_to_machine(pfn) == mfn) + if (__pfn_to_mfn(pfn) == mfn) set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); return 0; } -EXPORT_SYMBOL_GPL(m2p_add_override); -int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count) +int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + struct gnttab_map_grant_ref *kmap_ops, + struct page **pages, unsigned int count) { int i, ret = 0; bool lazy = false; + pte_t *pte; if (xen_feature(XENFEAT_auto_translated_physmap)) return 0; @@ -1029,35 +749,75 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, } for (i = 0; i < count; i++) { - unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i])); - unsigned long pfn = page_to_pfn(pages[i]); + unsigned long mfn, pfn; - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { - ret = -EINVAL; - goto out; + /* Do not add to override if the map failed. */ + if (map_ops[i].status) + continue; + + if (map_ops[i].flags & GNTMAP_contains_pte) { + pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + + (map_ops[i].host_addr & ~PAGE_MASK)); + mfn = pte_mfn(*pte); + } else { + mfn = PFN_DOWN(map_ops[i].dev_bus_addr); } + pfn = page_to_pfn(pages[i]); - set_page_private(pages[i], INVALID_P2M_ENTRY); - WARN_ON(!PagePrivate(pages[i])); - ClearPagePrivate(pages[i]); - set_phys_to_machine(pfn, pages[i]->index); + WARN_ON(PagePrivate(pages[i])); + SetPagePrivate(pages[i]); + set_page_private(pages[i], mfn); + pages[i]->index = pfn_to_mfn(pfn); - if (kmap_ops) - ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn); - if (ret) + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { + ret = -ENOMEM; goto out; + } + + if (kmap_ops) { + ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]); + if (ret) + goto out; + } } out: if (lazy) arch_leave_lazy_mmu_mode(); + return ret; } -EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); +EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); -int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn) +static struct page *m2p_find_override(unsigned long mfn) +{ + unsigned long flags; + struct list_head *bucket; + struct page *p, *ret; + + if (unlikely(!m2p_overrides)) + return NULL; + + ret = NULL; + bucket = &m2p_overrides[mfn_hash(mfn)]; + + spin_lock_irqsave(&m2p_override_lock, flags); + + list_for_each_entry(p, bucket, lru) { + if (page_private(p) == mfn) { + ret = p; + break; + } + } + + spin_unlock_irqrestore(&m2p_override_lock, flags); + + return ret; +} + +static int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op, + unsigned long mfn) { unsigned long flags; unsigned long pfn; @@ -1072,7 +832,7 @@ int m2p_remove_override(struct page *page, ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, - "m2p_remove_override: pfn %lx not mapped", pfn)) + "m2p_remove_override: pfn %lx not mapped", pfn)) return -EINVAL; } @@ -1102,9 +862,8 @@ int m2p_remove_override(struct page *page, * hypercall actually returned an error. */ if (kmap_op->handle == GNTST_general_error) { - printk(KERN_WARNING "m2p_remove_override: " - "pfn %lx mfn %lx, failed to modify kernel mappings", - pfn, mfn); + pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings", + pfn, mfn); put_balloon_scratch_page(); return -1; } @@ -1112,14 +871,14 @@ int m2p_remove_override(struct page *page, xen_mc_batch(); mcs = __xen_mc_entry( - sizeof(struct gnttab_unmap_and_replace)); + sizeof(struct gnttab_unmap_and_replace)); unmap_op = mcs.args; unmap_op->host_addr = kmap_op->host_addr; unmap_op->new_addr = scratch_page_address; unmap_op->handle = kmap_op->handle; MULTI_grant_table_op(mcs.mc, - GNTTABOP_unmap_and_replace, unmap_op, 1); + GNTTABOP_unmap_and_replace, unmap_op, 1); mcs = __xen_mc_entry(0); MULTI_update_va_mapping(mcs.mc, scratch_page_address, @@ -1145,35 +904,56 @@ int m2p_remove_override(struct page *page, * pfn again. */ mfn &= ~FOREIGN_FRAME_BIT; pfn = mfn_to_pfn_no_overrides(mfn); - if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && + if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) && m2p_find_override(mfn) == NULL) set_phys_to_machine(pfn, mfn); return 0; } -EXPORT_SYMBOL_GPL(m2p_remove_override); -struct page *m2p_find_override(unsigned long mfn) +int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, + struct gnttab_map_grant_ref *kmap_ops, + struct page **pages, unsigned int count) { - unsigned long flags; - struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)]; - struct page *p, *ret; + int i, ret = 0; + bool lazy = false; - ret = NULL; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; - spin_lock_irqsave(&m2p_override_lock, flags); + if (kmap_ops && + !in_interrupt() && + paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { + arch_enter_lazy_mmu_mode(); + lazy = true; + } - list_for_each_entry(p, bucket, lru) { - if (page_private(p) == mfn) { - ret = p; - break; + for (i = 0; i < count; i++) { + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); + unsigned long pfn = page_to_pfn(pages[i]); + + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { + ret = -EINVAL; + goto out; } - } - spin_unlock_irqrestore(&m2p_override_lock, flags); + set_page_private(pages[i], INVALID_P2M_ENTRY); + WARN_ON(!PagePrivate(pages[i])); + ClearPagePrivate(pages[i]); + set_phys_to_machine(pfn, pages[i]->index); + + if (kmap_ops) + ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn); + if (ret) + goto out; + } +out: + if (lazy) + arch_leave_lazy_mmu_mode(); return ret; } +EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) { @@ -1192,79 +972,29 @@ EXPORT_SYMBOL_GPL(m2p_find_override_pfn); #include "debugfs.h" static int p2m_dump_show(struct seq_file *m, void *v) { - static const char * const level_name[] = { "top", "middle", - "entry", "abnormal", "error"}; -#define TYPE_IDENTITY 0 -#define TYPE_MISSING 1 -#define TYPE_PFN 2 -#define TYPE_UNKNOWN 3 static const char * const type_name[] = { - [TYPE_IDENTITY] = "identity", - [TYPE_MISSING] = "missing", - [TYPE_PFN] = "pfn", - [TYPE_UNKNOWN] = "abnormal"}; - unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; - unsigned int uninitialized_var(prev_level); - unsigned int uninitialized_var(prev_type); - - if (!p2m_top) - return 0; - - for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { - unsigned topidx = p2m_top_index(pfn); - unsigned mididx = p2m_mid_index(pfn); - unsigned idx = p2m_index(pfn); - unsigned lvl, type; - - lvl = 4; - type = TYPE_UNKNOWN; - if (p2m_top[topidx] == p2m_mid_missing) { - lvl = 0; type = TYPE_MISSING; - } else if (p2m_top[topidx] == NULL) { - lvl = 0; type = TYPE_UNKNOWN; - } else if (p2m_top[topidx][mididx] == NULL) { - lvl = 1; type = TYPE_UNKNOWN; - } else if (p2m_top[topidx][mididx] == p2m_identity) { - lvl = 1; type = TYPE_IDENTITY; - } else if (p2m_top[topidx][mididx] == p2m_missing) { - lvl = 1; type = TYPE_MISSING; - } else if (p2m_top[topidx][mididx][idx] == 0) { - lvl = 2; type = TYPE_UNKNOWN; - } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { - lvl = 2; type = TYPE_IDENTITY; - } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { - lvl = 2; type = TYPE_MISSING; - } else if (p2m_top[topidx][mididx][idx] == pfn) { - lvl = 2; type = TYPE_PFN; - } else if (p2m_top[topidx][mididx][idx] != pfn) { - lvl = 2; type = TYPE_PFN; - } - if (pfn == 0) { - prev_level = lvl; - prev_type = type; - } - if (pfn == MAX_DOMAIN_PAGES-1) { - lvl = 3; - type = TYPE_UNKNOWN; - } - if (prev_type != type) { - seq_printf(m, " [0x%lx->0x%lx] %s\n", - prev_pfn_type, pfn, type_name[prev_type]); - prev_pfn_type = pfn; + [P2M_TYPE_IDENTITY] = "identity", + [P2M_TYPE_MISSING] = "missing", + [P2M_TYPE_PFN] = "pfn", + [P2M_TYPE_UNKNOWN] = "abnormal"}; + unsigned long pfn, first_pfn; + int type, prev_type; + + prev_type = xen_p2m_elem_type(0); + first_pfn = 0; + + for (pfn = 0; pfn < xen_p2m_size; pfn++) { + type = xen_p2m_elem_type(pfn); + if (type != prev_type) { + seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn, + type_name[prev_type]); prev_type = type; - } - if (prev_level != lvl) { - seq_printf(m, " [0x%lx->0x%lx] level %s\n", - prev_pfn_level, pfn, level_name[prev_level]); - prev_pfn_level = pfn; - prev_level = lvl; + first_pfn = pfn; } } + seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn, + type_name[prev_type]); return 0; -#undef TYPE_IDENTITY -#undef TYPE_MISSING -#undef TYPE_PFN -#undef TYPE_UNKNOWN } static int p2m_dump_open(struct inode *inode, struct file *filp) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 29834b3fd87f..dfd77dec8e2b 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -30,6 +30,7 @@ #include "xen-ops.h" #include "vdso.h" #include "p2m.h" +#include "mmu.h" /* These are code, but not functions. Defined in entry.S */ extern const char xen_hypervisor_callback[]; @@ -47,8 +48,19 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; /* Number of pages released from the initial allocation. */ unsigned long xen_released_pages; -/* Buffer used to remap identity mapped pages */ -unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata; +/* + * Buffer used to remap identity mapped pages. We only need the virtual space. + * The physical page behind this address is remapped as needed to different + * buffer pages. + */ +#define REMAP_SIZE (P2M_PER_PAGE - 3) +static struct { + unsigned long next_area_mfn; + unsigned long target_pfn; + unsigned long size; + unsigned long mfns[REMAP_SIZE]; +} xen_remap_buf __initdata __aligned(PAGE_SIZE); +static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; /* * The maximum amount of extra memory compared to the base size. The @@ -64,7 +76,6 @@ unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata; static void __init xen_add_extra_mem(u64 start, u64 size) { - unsigned long pfn; int i; for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { @@ -84,75 +95,76 @@ static void __init xen_add_extra_mem(u64 start, u64 size) printk(KERN_WARNING "Warning: not enough extra memory regions\n"); memblock_reserve(start, size); +} - xen_max_p2m_pfn = PFN_DOWN(start + size); - for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { - unsigned long mfn = pfn_to_mfn(pfn); - - if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) - continue; - WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", - pfn, mfn); +static void __init xen_del_extra_mem(u64 start, u64 size) +{ + int i; + u64 start_r, size_r; - __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + start_r = xen_extra_mem[i].start; + size_r = xen_extra_mem[i].size; + + /* Start of region. */ + if (start_r == start) { + BUG_ON(size > size_r); + xen_extra_mem[i].start += size; + xen_extra_mem[i].size -= size; + break; + } + /* End of region. */ + if (start_r + size_r == start + size) { + BUG_ON(size > size_r); + xen_extra_mem[i].size -= size; + break; + } + /* Mid of region. */ + if (start > start_r && start < start_r + size_r) { + BUG_ON(start + size > start_r + size_r); + xen_extra_mem[i].size = start - start_r; + /* Calling memblock_reserve() again is okay. */ + xen_add_extra_mem(start + size, start_r + size_r - + (start + size)); + break; + } } + memblock_free(start, size); } -static unsigned long __init xen_do_chunk(unsigned long start, - unsigned long end, bool release) +/* + * Called during boot before the p2m list can take entries beyond the + * hypervisor supplied p2m list. Entries in extra mem are to be regarded as + * invalid. + */ +unsigned long __ref xen_chk_extra_mem(unsigned long pfn) { - struct xen_memory_reservation reservation = { - .address_bits = 0, - .extent_order = 0, - .domid = DOMID_SELF - }; - unsigned long len = 0; - unsigned long pfn; - int ret; + int i; + unsigned long addr = PFN_PHYS(pfn); - for (pfn = start; pfn < end; pfn++) { - unsigned long frame; - unsigned long mfn = pfn_to_mfn(pfn); + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + if (addr >= xen_extra_mem[i].start && + addr < xen_extra_mem[i].start + xen_extra_mem[i].size) + return INVALID_P2M_ENTRY; + } - if (release) { - /* Make sure pfn exists to start with */ - if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) - continue; - frame = mfn; - } else { - if (mfn != INVALID_P2M_ENTRY) - continue; - frame = pfn; - } - set_xen_guest_handle(reservation.extent_start, &frame); - reservation.nr_extents = 1; + return IDENTITY_FRAME(pfn); +} - ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap, - &reservation); - WARN(ret != 1, "Failed to %s pfn %lx err=%d\n", - release ? "release" : "populate", pfn, ret); +/* + * Mark all pfns of extra mem as invalid in p2m list. + */ +void __init xen_inv_extra_mem(void) +{ + unsigned long pfn, pfn_s, pfn_e; + int i; - if (ret == 1) { - if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) { - if (release) - break; - set_xen_guest_handle(reservation.extent_start, &frame); - reservation.nr_extents = 1; - ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, - &reservation); - break; - } - len++; - } else - break; + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + pfn_s = PFN_DOWN(xen_extra_mem[i].start); + pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); + for (pfn = pfn_s; pfn < pfn_e; pfn++) + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } - if (len) - printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n", - release ? "Freeing" : "Populating", - start, end, len, - release ? "freed" : "added"); - - return len; } /* @@ -198,26 +210,62 @@ static unsigned long __init xen_find_pfn_range( return done; } +static int __init xen_free_mfn(unsigned long mfn) +{ + struct xen_memory_reservation reservation = { + .address_bits = 0, + .extent_order = 0, + .domid = DOMID_SELF + }; + + set_xen_guest_handle(reservation.extent_start, &mfn); + reservation.nr_extents = 1; + + return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); +} + /* - * This releases a chunk of memory and then does the identity map. It's used as + * This releases a chunk of memory and then does the identity map. It's used * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, unsigned long *released) { + unsigned long len = 0; + unsigned long pfn, end; + int ret; + WARN_ON(start_pfn > end_pfn); + end = min(end_pfn, nr_pages); + for (pfn = start_pfn; pfn < end; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + + /* Make sure pfn exists to start with */ + if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) + continue; + + ret = xen_free_mfn(mfn); + WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); + + if (ret == 1) { + if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) + break; + len++; + } else + break; + } + /* Need to release pages first */ - *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true); + *released += len; *identity += set_phys_range_identity(start_pfn, end_pfn); } /* - * Helper function to update both the p2m and m2p tables. + * Helper function to update the p2m and m2p tables and kernel mapping. */ -static unsigned long __init xen_update_mem_tables(unsigned long pfn, - unsigned long mfn) +static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) { struct mmu_update update = { .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, @@ -225,161 +273,88 @@ static unsigned long __init xen_update_mem_tables(unsigned long pfn, }; /* Update p2m */ - if (!early_set_phys_to_machine(pfn, mfn)) { + if (!set_phys_to_machine(pfn, mfn)) { WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", pfn, mfn); - return false; + BUG(); } /* Update m2p */ if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) { WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n", mfn, pfn); - return false; + BUG(); } - return true; + /* Update kernel mapping, but not for highmem. */ + if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) + return; + + if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), + mfn_pte(mfn, PAGE_KERNEL), 0)) { + WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n", + mfn, pfn); + BUG(); + } } /* * This function updates the p2m and m2p tables with an identity map from - * start_pfn to start_pfn+size and remaps the underlying RAM of the original - * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks - * to not exhaust the reserved brk space. Doing it in properly aligned blocks - * ensures we only allocate the minimum required leaf pages in the p2m table. It - * copies the existing mfns from the p2m table under the 1:1 map, overwrites - * them with the identity map and then updates the p2m and m2p tables with the - * remapped memory. + * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the + * original allocation at remap_pfn. The information needed for remapping is + * saved in the memory itself to avoid the need for allocating buffers. The + * complete remap information is contained in a list of MFNs each containing + * up to REMAP_SIZE MFNs and the start target PFN for doing the remap. + * This enables us to preserve the original mfn sequence while doing the + * remapping at a time when the memory management is capable of allocating + * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and + * its callers. */ -static unsigned long __init xen_do_set_identity_and_remap_chunk( +static void __init xen_do_set_identity_and_remap_chunk( unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) { + unsigned long buf = (unsigned long)&xen_remap_buf; + unsigned long mfn_save, mfn; unsigned long ident_pfn_iter, remap_pfn_iter; - unsigned long ident_start_pfn_align, remap_start_pfn_align; - unsigned long ident_end_pfn_align, remap_end_pfn_align; - unsigned long ident_boundary_pfn, remap_boundary_pfn; - unsigned long ident_cnt = 0; - unsigned long remap_cnt = 0; + unsigned long ident_end_pfn = start_pfn + size; unsigned long left = size; - unsigned long mod; - int i; + unsigned long ident_cnt = 0; + unsigned int i, chunk; WARN_ON(size == 0); BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); - /* - * Determine the proper alignment to remap memory in P2M_PER_PAGE sized - * blocks. We need to keep track of both the existing pfn mapping and - * the new pfn remapping. - */ - mod = start_pfn % P2M_PER_PAGE; - ident_start_pfn_align = - mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn; - mod = remap_pfn % P2M_PER_PAGE; - remap_start_pfn_align = - mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn; - mod = (start_pfn + size) % P2M_PER_PAGE; - ident_end_pfn_align = start_pfn + size - mod; - mod = (remap_pfn + size) % P2M_PER_PAGE; - remap_end_pfn_align = remap_pfn + size - mod; - - /* Iterate over each p2m leaf node in each range */ - for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align; - ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align; - ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) { - /* Check we aren't past the end */ - BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size); - BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size); - - /* Save p2m mappings */ - for (i = 0; i < P2M_PER_PAGE; i++) - xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i); - - /* Set identity map which will free a p2m leaf */ - ident_cnt += set_phys_range_identity(ident_pfn_iter, - ident_pfn_iter + P2M_PER_PAGE); + mfn_save = virt_to_mfn(buf); -#ifdef DEBUG - /* Helps verify a p2m leaf has been freed */ - for (i = 0; i < P2M_PER_PAGE; i++) { - unsigned int pfn = ident_pfn_iter + i; - BUG_ON(pfn_to_mfn(pfn) != pfn); - } -#endif - /* Now remap memory */ - for (i = 0; i < P2M_PER_PAGE; i++) { - unsigned long mfn = xen_remap_buf[i]; - - /* This will use the p2m leaf freed above */ - if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) { - WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n", - remap_pfn_iter + i, mfn); - return 0; - } - - remap_cnt++; - } - - left -= P2M_PER_PAGE; - } - - /* Max boundary space possible */ - BUG_ON(left > (P2M_PER_PAGE - 1) * 2); + for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; + ident_pfn_iter < ident_end_pfn; + ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) { + chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE; - /* Now handle the boundary conditions */ - ident_boundary_pfn = start_pfn; - remap_boundary_pfn = remap_pfn; - for (i = 0; i < left; i++) { - unsigned long mfn; + /* Map first pfn to xen_remap_buf */ + mfn = pfn_to_mfn(ident_pfn_iter); + set_pte_mfn(buf, mfn, PAGE_KERNEL); - /* These two checks move from the start to end boundaries */ - if (ident_boundary_pfn == ident_start_pfn_align) - ident_boundary_pfn = ident_pfn_iter; - if (remap_boundary_pfn == remap_start_pfn_align) - remap_boundary_pfn = remap_pfn_iter; + /* Save mapping information in page */ + xen_remap_buf.next_area_mfn = xen_remap_mfn; + xen_remap_buf.target_pfn = remap_pfn_iter; + xen_remap_buf.size = chunk; + for (i = 0; i < chunk; i++) + xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i); - /* Check we aren't past the end */ - BUG_ON(ident_boundary_pfn >= start_pfn + size); - BUG_ON(remap_boundary_pfn >= remap_pfn + size); - - mfn = pfn_to_mfn(ident_boundary_pfn); - - if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) { - WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n", - remap_pfn_iter + i, mfn); - return 0; - } - remap_cnt++; + /* Put remap buf into list. */ + xen_remap_mfn = mfn; - ident_boundary_pfn++; - remap_boundary_pfn++; - } + /* Set identity map */ + ident_cnt += set_phys_range_identity(ident_pfn_iter, + ident_pfn_iter + chunk); - /* Finish up the identity map */ - if (ident_start_pfn_align >= ident_end_pfn_align) { - /* - * In this case we have an identity range which does not span an - * aligned block so everything needs to be identity mapped here. - * If we didn't check this we might remap too many pages since - * the align boundaries are not meaningful in this case. - */ - ident_cnt += set_phys_range_identity(start_pfn, - start_pfn + size); - } else { - /* Remapped above so check each end of the chunk */ - if (start_pfn < ident_start_pfn_align) - ident_cnt += set_phys_range_identity(start_pfn, - ident_start_pfn_align); - if (start_pfn + size > ident_pfn_iter) - ident_cnt += set_phys_range_identity(ident_pfn_iter, - start_pfn + size); + left -= chunk; } - BUG_ON(ident_cnt != size); - BUG_ON(remap_cnt != size); - - return size; + /* Restore old xen_remap_buf mapping */ + set_pte_mfn(buf, mfn_save, PAGE_KERNEL); } /* @@ -396,8 +371,7 @@ static unsigned long __init xen_do_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk( const struct e820entry *list, size_t map_size, unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, - unsigned long *identity, unsigned long *remapped, - unsigned long *released) + unsigned long *identity, unsigned long *released) { unsigned long pfn; unsigned long i = 0; @@ -431,19 +405,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk( if (size > remap_range_size) size = remap_range_size; - if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) { - WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n", - cur_pfn, size, remap_pfn); - xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages, identity, released); - break; - } + xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn); /* Update variables to reflect new mappings. */ i += size; remap_pfn += size; *identity += size; - *remapped += size; } /* @@ -458,13 +425,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk( return remap_pfn; } -static unsigned long __init xen_set_identity_and_remap( +static void __init xen_set_identity_and_remap( const struct e820entry *list, size_t map_size, unsigned long nr_pages, unsigned long *released) { phys_addr_t start = 0; unsigned long identity = 0; - unsigned long remapped = 0; unsigned long last_pfn = nr_pages; const struct e820entry *entry; unsigned long num_released = 0; @@ -494,8 +460,7 @@ static unsigned long __init xen_set_identity_and_remap( last_pfn = xen_set_identity_and_remap_chunk( list, map_size, start_pfn, end_pfn, nr_pages, last_pfn, - &identity, &remapped, - &num_released); + &identity, &num_released); start = end; } } @@ -503,12 +468,63 @@ static unsigned long __init xen_set_identity_and_remap( *released = num_released; pr_info("Set %ld page(s) to 1-1 mapping\n", identity); - pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped, - last_pfn); pr_info("Released %ld page(s)\n", num_released); +} + +/* + * Remap the memory prepared in xen_do_set_identity_and_remap_chunk(). + * The remap information (which mfn remap to which pfn) is contained in the + * to be remapped memory itself in a linked list anchored at xen_remap_mfn. + * This scheme allows to remap the different chunks in arbitrary order while + * the resulting mapping will be independant from the order. + */ +void __init xen_remap_memory(void) +{ + unsigned long buf = (unsigned long)&xen_remap_buf; + unsigned long mfn_save, mfn, pfn; + unsigned long remapped = 0; + unsigned int i; + unsigned long pfn_s = ~0UL; + unsigned long len = 0; + + mfn_save = virt_to_mfn(buf); + + while (xen_remap_mfn != INVALID_P2M_ENTRY) { + /* Map the remap information */ + set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL); - return last_pfn; + BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]); + + pfn = xen_remap_buf.target_pfn; + for (i = 0; i < xen_remap_buf.size; i++) { + mfn = xen_remap_buf.mfns[i]; + xen_update_mem_tables(pfn, mfn); + remapped++; + pfn++; + } + if (pfn_s == ~0UL || pfn == pfn_s) { + pfn_s = xen_remap_buf.target_pfn; + len += xen_remap_buf.size; + } else if (pfn_s + len == xen_remap_buf.target_pfn) { + len += xen_remap_buf.size; + } else { + xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len)); + pfn_s = xen_remap_buf.target_pfn; + len = xen_remap_buf.size; + } + + mfn = xen_remap_mfn; + xen_remap_mfn = xen_remap_buf.next_area_mfn; + } + + if (pfn_s != ~0UL && len) + xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len)); + + set_pte_mfn(buf, mfn_save, PAGE_KERNEL); + + pr_info("Remapped %ld page(s)\n", remapped); } + static unsigned long __init xen_get_max_pages(void) { unsigned long max_pages = MAX_DOMAIN_PAGES; @@ -569,7 +585,6 @@ char * __init xen_memory_setup(void) int rc; struct xen_memory_map memmap; unsigned long max_pages; - unsigned long last_pfn = 0; unsigned long extra_pages = 0; int i; int op; @@ -616,17 +631,14 @@ char * __init xen_memory_setup(void) extra_pages += max_pages - max_pfn; /* - * Set identity map on non-RAM pages and remap the underlying RAM. + * Set identity map on non-RAM pages and prepare remapping the + * underlying RAM. */ - last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, - &xen_released_pages); + xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, + &xen_released_pages); extra_pages += xen_released_pages; - if (last_pfn > max_pfn) { - max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); - mem_end = PFN_PHYS(max_pfn); - } /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base @@ -653,6 +665,7 @@ char * __init xen_memory_setup(void) size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); + xen_max_p2m_pfn = PFN_DOWN(addr + size); } else type = E820_UNUSABLE; } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 28c7e0be56e4..5686bd9d58cc 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -29,12 +29,13 @@ void xen_build_mfn_list_list(void); void xen_setup_machphys_mapping(void); void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); void xen_reserve_top(void); -extern unsigned long xen_max_p2m_pfn; void xen_mm_pin_all(void); void xen_mm_unpin_all(void); -void xen_set_pat(u64); +unsigned long __ref xen_chk_extra_mem(unsigned long pfn); +void __init xen_inv_extra_mem(void); +void __init xen_remap_memory(void); char * __init xen_memory_setup(void); char * xen_auto_xlated_memory_setup(void); void __init xen_arch_setup(void); @@ -47,7 +48,7 @@ void xen_hvm_init_shared_info(void); void xen_unplug_emulated_devices(void); void __init xen_build_dynamic_phys_to_machine(void); -unsigned long __init xen_revector_p2m_tree(void); +void __init xen_vmalloc_p2m_tree(void); void xen_init_irq_ops(void); void xen_setup_timer(int cpu); diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 81f57e8c8f1b..e31d4949124a 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -98,12 +98,6 @@ config XTENSA_VARIANT_DC233C help This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE). -config XTENSA_VARIANT_S6000 - bool "s6000 - Stretch software configurable processor" - select VARIANT_IRQ_SWITCH - select ARCH_REQUIRE_GPIOLIB - select XTENSA_CALIBRATE_CCOUNT - config XTENSA_VARIANT_CUSTOM bool "Custom Xtensa processor configuration" select MAY_HAVE_SMP @@ -126,7 +120,6 @@ config XTENSA_VARIANT_NAME default "dc232b" if XTENSA_VARIANT_DC232B default "dc233c" if XTENSA_VARIANT_DC233C default "fsf" if XTENSA_VARIANT_FSF - default "s6000" if XTENSA_VARIANT_S6000 default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM config XTENSA_VARIANT_MMU @@ -191,7 +184,6 @@ config HOTPLUG_CPU config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX bool "Initialize Xtensa MMU inside the Linux kernel code" - depends on MMU default y help Earlier version initialized the MMU in the exception vector @@ -311,15 +303,10 @@ config XTENSA_PLATFORM_XT2000 XT2000 is the name of Tensilica's feature-rich emulation platform. This hardware is capable of running a full Linux distribution. -config XTENSA_PLATFORM_S6105 - bool "S6105" - select HAVE_IDE - select SERIAL_CONSOLE - select NO_IOPORT_MAP - config XTENSA_PLATFORM_XTFPGA bool "XTFPGA" select ETHOC if ETHERNET + select PLATFORM_WANT_DEFAULT_MEM select SERIAL_CONSOLE select XTENSA_CALIBRATE_CCOUNT help @@ -406,6 +393,41 @@ source "drivers/pcmcia/Kconfig" source "drivers/pci/hotplug/Kconfig" +config PLATFORM_WANT_DEFAULT_MEM + def_bool n + +config DEFAULT_MEM_START + hex "Physical address of the default memory area start" + depends on PLATFORM_WANT_DEFAULT_MEM + default 0x00000000 if MMU + default 0x40000000 if !MMU + help + This is a fallback start address of the default memory area, it is + used when no physical memory size is passed through DTB or through + boot parameter from bootloader. + + In noMMU configuration the following parameters are derived from it: + - kernel load address; + - kernel entry point address; + - relocatable vectors base address; + - uBoot load address; + - TASK_SIZE. + + If unsure, leave the default value here. + +config DEFAULT_MEM_SIZE + hex "Maximal size of the default memory area" + depends on PLATFORM_WANT_DEFAULT_MEM + default 0x04000000 + help + This is a fallback size of the default memory area, it is used when + no physical memory size is passed through DTB or through boot + parameter from bootloader. + + It's also used for TASK_SIZE calculation in noMMU configuration. + + If unsure, leave the default value here. + endmenu menu "Executable file formats" @@ -414,6 +436,12 @@ source "fs/Kconfig.binfmt" endmenu +menu "Power management options" + +source "kernel/power/Kconfig" + +endmenu + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug index af7da74d535f..8430af27de0a 100644 --- a/arch/xtensa/Kconfig.debug +++ b/arch/xtensa/Kconfig.debug @@ -4,7 +4,7 @@ source "lib/Kconfig.debug" config DEBUG_TLB_SANITY bool "Debug TLB sanity" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && MMU help Enable this to turn on TLB sanity check on each entry to userspace. This check can spot missing TLB invalidation/wrong PTE permissions/ @@ -14,7 +14,7 @@ config DEBUG_TLB_SANITY config LD_NO_RELAX bool "Disable linker relaxation" - default n + default y help Enable this function to disable link-time optimizations. The default linker behavior is to combine identical literal diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 472533064b46..f9e6a068aafd 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -35,7 +35,6 @@ endif platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000 platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss -platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105 platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga PLATFORM = $(platform-y) diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S index 932b58ef33d4..958b33af96b7 100644 --- a/arch/xtensa/boot/boot-elf/boot.lds.S +++ b/arch/xtensa/boot/boot-elf/boot.lds.S @@ -41,6 +41,7 @@ SECTIONS __bss_end = .; } +#ifdef CONFIG_MMU /* * This is a remapped copy of the Reset Vector Code. * It keeps gdb in sync with the PC after switching @@ -51,4 +52,5 @@ SECTIONS { *(.ResetVector.remapped_text) } +#endif } diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S index 1388a499753b..9341a5750694 100644 --- a/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/arch/xtensa/boot/boot-elf/bootstrap.S @@ -20,6 +20,7 @@ #include <asm/page.h> #include <asm/cacheasm.h> #include <asm/initialize_mmu.h> +#include <asm/vectors.h> #include <linux/linkage.h> .section .ResetVector.text, "ax" @@ -34,12 +35,7 @@ _ResetVector: .align 4 RomInitAddr: -#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ - XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY - .word 0x00003000 -#else - .word 0xd0003000 -#endif + .word LOAD_MEMORY_ADDRESS RomBootParam: .word _bootparam _bootparam: @@ -79,6 +75,7 @@ reset: movi a4, 0 jx a0 +#ifdef CONFIG_MMU .align 4 .section .ResetVector.remapped_text, "x" @@ -102,3 +99,4 @@ _RemappedSetupMMU: #endif .end no-absolute-literals +#endif diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile index 545759819ef9..403fcf23405c 100644 --- a/arch/xtensa/boot/boot-uboot/Makefile +++ b/arch/xtensa/boot/boot-uboot/Makefile @@ -4,11 +4,15 @@ # for more details. # +ifdef CONFIG_MMU ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX UIMAGE_LOADADDR = 0x00003000 else UIMAGE_LOADADDR = 0xd0003000 endif +else +UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) ) +endif UIMAGE_COMPRESSION = gzip $(obj)/../uImage: vmlinux.bin.gz FORCE diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index b966baf82cae..e4d193e7a300 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig @@ -143,7 +143,6 @@ CONFIG_MMU=y # CONFIG_XTENSA_VARIANT_FSF=y # CONFIG_XTENSA_VARIANT_DC232B is not set -# CONFIG_XTENSA_VARIANT_S6000 is not set # CONFIG_XTENSA_UNALIGNED_USER is not set # CONFIG_PREEMPT is not set CONFIG_XTENSA_CALIBRATE_CCOUNT=y @@ -161,7 +160,6 @@ CONFIG_XTENSA_ISS_NETWORK=y # CONFIG_XTENSA_PLATFORM_ISS=y # CONFIG_XTENSA_PLATFORM_XT2000 is not set -# CONFIG_XTENSA_PLATFORM_S6105 is not set # CONFIG_GENERIC_CALIBRATE_DELAY is not set CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target" @@ -759,3 +757,4 @@ CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_HAS_IOMEM=y CONFIG_HAS_DMA=y CONFIG_NLATTR=y +CONFIG_LD_NO_RELAX=y diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig deleted file mode 100644 index 9471265b8ca6..000000000000 --- a/arch/xtensa/configs/s6105_defconfig +++ /dev/null @@ -1,615 +0,0 @@ -# -# Automatically generated make config: don't edit -# Linux kernel version: 2.6.29-rc7-s6 -# Tue Mar 10 11:09:26 2009 -# -# CONFIG_FRAME_POINTER is not set -CONFIG_ZONE_DMA=y -CONFIG_XTENSA=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set -CONFIG_NO_IOPORT_MAP=y -CONFIG_HZ=100 -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_LOCALVERSION="" -CONFIG_LOCALVERSION_AUTO=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -# CONFIG_POSIX_MQUEUE is not set -# CONFIG_BSD_PROCESS_ACCT is not set -# CONFIG_TASKSTATS is not set -# CONFIG_AUDIT is not set - -# -# RCU Subsystem -# -# CONFIG_CLASSIC_RCU is not set -# CONFIG_TREE_RCU is not set -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_TRACE is not set -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_PREEMPT_RCU_TRACE is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_GROUP_SCHED is not set -# CONFIG_CGROUPS is not set -# CONFIG_SYSFS_DEPRECATED_V2 is not set -# CONFIG_RELAY is not set -# CONFIG_NAMESPACES is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_EXPERT=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -# CONFIG_KALLSYMS_EXTRA_PASS is not set -# CONFIG_HOTPLUG is not set -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_ANON_INODES=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_AIO=y -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SLOB is not set -# CONFIG_PROFILING is not set -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -# CONFIG_MODULES is not set -CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_BLK_DEV_INTEGRITY is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -# CONFIG_IOSCHED_AS is not set -# CONFIG_IOSCHED_DEADLINE is not set -CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -# CONFIG_FREEZER is not set -# CONFIG_MMU is not set -CONFIG_VARIANT_IRQ_SWITCH=y - -# -# Processor type and features -# -# CONFIG_XTENSA_VARIANT_FSF is not set -# CONFIG_XTENSA_VARIANT_DC232B is not set -CONFIG_XTENSA_VARIANT_S6000=y -# CONFIG_XTENSA_UNALIGNED_USER is not set -CONFIG_PREEMPT=y -# CONFIG_HIGHMEM is not set -CONFIG_XTENSA_CALIBRATE_CCOUNT=y -CONFIG_SERIAL_CONSOLE=y -# CONFIG_XTENSA_ISS_NETWORK is not set - -# -# Bus options -# -# CONFIG_PCI is not set -# CONFIG_ARCH_SUPPORTS_MSI is not set - -# -# Platform options -# -# CONFIG_XTENSA_PLATFORM_ISS is not set -# CONFIG_XTENSA_PLATFORM_XT2000 is not set -CONFIG_XTENSA_PLATFORM_S6105=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=ttyS1,38400 debug bootmem_debug loglevel=7" -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_PHYS_ADDR_T_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_VIRT_TO_BUS=y - -# -# Executable file formats -# -CONFIG_KCORE_ELF=y -CONFIG_BINFMT_FLAT=y -# CONFIG_BINFMT_ZFLAT is not set -# CONFIG_BINFMT_SHARED_FLAT is not set -# CONFIG_HAVE_AOUT is not set -# CONFIG_BINFMT_MISC is not set -CONFIG_NET=y - -# -# Networking options -# -CONFIG_COMPAT_NET_DEV_OPS=y -CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set -CONFIG_UNIX=y -# CONFIG_NET_KEY is not set -CONFIG_INET=y -# CONFIG_IP_MULTICAST is not set -# CONFIG_IP_ADVANCED_ROUTER is not set -CONFIG_IP_FIB_HASH=y -# CONFIG_IP_PNP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE is not set -# CONFIG_ARPD is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set -# CONFIG_INET_DIAG is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -# CONFIG_IPV6 is not set -# CONFIG_NETWORK_SECMARK is not set -# CONFIG_NETFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_BRIDGE is not set -# CONFIG_NET_DSA is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_ECONET is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_NET_SCHED is not set -# CONFIG_DCB is not set - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -# CONFIG_WIRELESS is not set -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_CONNECTOR is not set -# CONFIG_MTD is not set -# CONFIG_PARPORT is not set -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_COW_COMMON is not set -# CONFIG_BLK_DEV_LOOP is not set -# CONFIG_BLK_DEV_NBD is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -# CONFIG_BLK_DEV_HD is not set -# CONFIG_MISC_DEVICES is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -# CONFIG_RAID_ATTRS is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_NETLINK is not set -# CONFIG_ATA is not set -# CONFIG_MD is not set -CONFIG_NETDEVICES=y -# CONFIG_DUMMY is not set -# CONFIG_BONDING is not set -# CONFIG_MACVLAN is not set -# CONFIG_EQUALIZER is not set -# CONFIG_TUN is not set -# CONFIG_VETH is not set -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -# CONFIG_MARVELL_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_QSEMI_PHY is not set -# CONFIG_LXT_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_VITESSE_PHY is not set -CONFIG_SMSC_PHY=y -# CONFIG_BROADCOM_PHY is not set -# CONFIG_ICPLUS_PHY is not set -# CONFIG_REALTEK_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_FIXED_PHY is not set -# CONFIG_MDIO_BITBANG is not set -# CONFIG_NET_ETHERNET is not set -CONFIG_NETDEV_1000=y -CONFIG_S6GMAC=y -# CONFIG_NETDEV_10000 is not set - -# -# Wireless LAN -# -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -# CONFIG_ISDN is not set -# CONFIG_PHONE is not set - -# -# Input device support -# -# CONFIG_INPUT is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -# CONFIG_VT is not set -# CONFIG_DEVKMEM is not set -# CONFIG_SERIAL_NONSTANDARD is not set - -# -# Serial drivers -# -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=2 -CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -# CONFIG_SERIAL_8250_EXTENDED is not set - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_IPMI_HANDLER is not set -# CONFIG_HW_RANDOM is not set -# CONFIG_R3964 is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_I2C is not set -# CONFIG_SPI is not set -CONFIG_ARCH_REQUIRE_GPIOLIB=y -CONFIG_GPIOLIB=y -# CONFIG_DEBUG_GPIO is not set -# CONFIG_GPIO_SYSFS is not set - -# -# Memory mapped GPIO expanders: -# - -# -# I2C GPIO expanders: -# - -# -# PCI GPIO expanders: -# - -# -# SPI GPIO expanders: -# -# CONFIG_W1 is not set -# CONFIG_POWER_SUPPLY is not set -# CONFIG_HWMON is not set -# CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -# CONFIG_SSB is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set - -# -# Graphics support -# -# CONFIG_VGASTATE is not set -# CONFIG_VIDEO_OUTPUT_CONTROL is not set -# CONFIG_FB is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set -# CONFIG_SOUND is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -# CONFIG_RTC_INTF_SYSFS is not set -# CONFIG_RTC_INTF_PROC is not set -# CONFIG_RTC_INTF_DEV is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -CONFIG_RTC_DRV_M41T80=y -# CONFIG_RTC_DRV_M41T80_WDT is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8581 is not set - -# -# SPI RTC drivers -# - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_DMADEVICES is not set -# CONFIG_UIO is not set -# CONFIG_STAGING is not set - -# -# File systems -# -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -# CONFIG_EXT4_FS is not set -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y -# CONFIG_XFS_FS is not set -# CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set -# CONFIG_DNOTIFY is not set -# CONFIG_INOTIFY is not set -# CONFIG_QUOTA is not set -# CONFIG_AUTOFS_FS is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -# CONFIG_MSDOS_FS is not set -# CONFIG_VFAT_FS is not set -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -CONFIG_SYSFS=y -# CONFIG_TMPFS is not set -# CONFIG_HUGETLB_PAGE is not set -# CONFIG_CONFIGFS_FS is not set -# CONFIG_MISC_FILESYSTEMS is not set -# CONFIG_NETWORK_FILESYSTEMS is not set - -# -# Partition Types -# -# CONFIG_PARTITION_ADVANCED is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_NLS is not set -# CONFIG_DLM is not set - -# -# Kernel hacking -# -CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=1024 -# CONFIG_MAGIC_SYSRQ is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set -# CONFIG_HEADERS_CHECK is not set -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_SHIRQ=y -CONFIG_DETECT_SOFTLOCKUP=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -# CONFIG_SCHED_DEBUG is not set -# CONFIG_SCHEDSTATS is not set -# CONFIG_TIMER_STATS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_SPINLOCK_SLEEP=y -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_INFO is not set -# CONFIG_DEBUG_VM is not set -CONFIG_DEBUG_NOMMU_REGIONS=y -# CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_SYSCTL_SYSCALL_CHECK is not set - -# -# Tracers -# -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_TRACE_BRANCH_PROFILING is not set -# CONFIG_DYNAMIC_DEBUG is not set -# CONFIG_SAMPLES is not set - -# -# Security options -# -# CONFIG_KEYS is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set -# CONFIG_CRYPTO is not set - -# -# Library routines -# -CONFIG_GENERIC_FIND_LAST_BIT=y -# CONFIG_CRC_CCITT is not set -# CONFIG_CRC16 is not set -# CONFIG_CRC_T10DIF is not set -# CONFIG_CRC_ITU_T is not set -# CONFIG_CRC32 is not set -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -CONFIG_PLIST=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_DMA=y diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 105d38922c44..86a9ab2e2ca9 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -9,7 +9,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fcntl.h generic-y += hardirq.h -generic-y += hash.h generic-y += ioctl.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index e72aaca7a77f..5f67ace97b32 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -67,6 +67,8 @@ extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); #else static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, unsigned long phys) { } +static inline void __invalidate_dcache_page_alias(unsigned long virt, + unsigned long phys) { } #endif #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE) extern void __invalidate_icache_page_alias(unsigned long, unsigned long); @@ -84,7 +86,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, * (see also Documentation/cachetlb.txt) */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP) +#if defined(CONFIG_MMU) && \ + ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)) #ifdef CONFIG_SMP void flush_cache_all(void); @@ -150,7 +153,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -#if (DCACHE_WAY_SIZE > PAGE_SIZE) +#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) extern void copy_to_user_page(struct vm_area_struct*, struct page*, unsigned long, void*, const void*, unsigned long); diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 2c7901edffaf..01cef6b40829 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -25,7 +25,7 @@ #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) -#define kmap_prot PAGE_KERNEL +#define kmap_prot PAGE_KERNEL_EXEC #if DCACHE_WAY_SIZE > PAGE_SIZE #define get_pkmap_color get_pkmap_color diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 600781edc8a3..e256f2270ec9 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -26,8 +26,16 @@ #include <asm/pgtable.h> #include <asm/vectors.h> +#if XCHAL_HAVE_PTP_MMU #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) +#else +#define CA_WRITEBACK (0x4) +#endif + +#ifndef XCHAL_SPANNING_WAY +#define XCHAL_SPANNING_WAY 0 +#endif #ifdef __ASSEMBLY__ @@ -75,7 +83,7 @@ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ - movi a2, 0x40000006 + movi a2, 0x40000000 | XCHAL_SPANNING_WAY idtlb a2 iitlb a2 isync @@ -141,9 +149,6 @@ jx a4 1: - movi a2, VECBASE_RESET_VADDR - wsr a2, vecbase - /* Step 5: remove temporary mapping. */ idtlb a7 iitlb a7 @@ -156,6 +161,33 @@ #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY */ +#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS + /* Enable data and instruction cache in the DEFAULT_MEMORY region + * if the processor has DTLB and ITLB. + */ + + movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY + movi a6, ~_PAGE_ATTRIB_MASK + movi a7, CA_WRITEBACK + movi a8, 0x20000000 + movi a9, PLATFORM_DEFAULT_MEM_SIZE + j 2f +1: + sub a9, a9, a8 +2: + rdtlb1 a3, a5 + ritlb1 a4, a5 + and a3, a3, a6 + and a4, a4, a6 + or a3, a3, a7 + or a4, a4, a7 + wdtlb a3, a5 + witlb a4, a5 + add a5, a5, a8 + bltu a8, a9, 1b + +#endif + .endm #endif /*__ASSEMBLY__*/ diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index d33c71a8c9ec..04c8ebdc4517 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -50,11 +50,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache); #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) -#ifdef CONFIG_MMU void init_mmu(void); -#else -static inline void init_mmu(void) { } -#endif static inline void set_rasid_register (unsigned long val) { diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 3407cf7989b7..22984fd1d846 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h @@ -1,3 +1,7 @@ +static inline void init_mmu(void) +{ +} + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index abe24c6f8b2f..ad38500471fa 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -20,10 +20,10 @@ * Fixed TLB translations in the processor. */ -#define XCHAL_KSEG_CACHED_VADDR 0xd0000000 -#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 -#define XCHAL_KSEG_PADDR 0x00000000 -#define XCHAL_KSEG_SIZE 0x08000000 +#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) +#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) +#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000) +#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) /* * PAGE_SHIFT determines the page size @@ -37,7 +37,7 @@ #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR #define MAX_MEM_PFN XCHAL_KSEG_SIZE #else -#define PAGE_OFFSET 0 +#define PAGE_OFFSET __XTENSA_UL_CONST(0) #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) #endif @@ -145,7 +145,7 @@ extern void copy_page(void *to, void *from); * some extra work */ -#if DCACHE_WAY_SIZE > PAGE_SIZE +#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE extern void clear_page_alias(void *vaddr, unsigned long paddr); extern void copy_page_alias(void *to, void *from, unsigned long to_paddr, unsigned long from_paddr); diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 0383aed59121..872bf0194e6d 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -178,6 +178,7 @@ #else /* no mmu */ +# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) # define PAGE_NONE __pgprot(0) # define PAGE_SHARED __pgprot(0) # define PAGE_COPY __pgprot(0) diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index c7211e7e182d..876eb380aa26 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -320,7 +320,7 @@ __asm__ __volatile__( \ ({ \ long __gu_err, __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -330,7 +330,7 @@ __asm__ __volatile__( \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ if (access_ok(VERIFY_READ,__gu_addr,size)) \ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index f74ddfbb92ef..a46c53f36113 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -19,6 +19,7 @@ #define _XTENSA_VECTORS_H #include <variant/core.h> +#include <platform/hardware.h> #define XCHAL_KIO_CACHED_VADDR 0xe0000000 #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 @@ -51,13 +52,13 @@ /* MMU Not being used - Virtual == Physical */ /* VECBASE */ - #define VIRTUAL_MEMORY_ADDRESS 0x00002000 + #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000) /* Location of the start of the kernel text, _start */ - #define KERNELOFFSET 0x00003000 + #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000) /* Loaded just above possibly live vectors */ - #define LOAD_MEMORY_ADDRESS 0x00003000 + #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000) #endif /* CONFIG_MMU */ diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 00eed6786d7e..201aec0e0446 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -55,6 +55,12 @@ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ +#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED +# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be + * uninitialized */ +#else +# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ +#endif /* * Flags for msync diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 39acec0cf0b1..4120af086160 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -91,4 +91,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _XTENSA_SOCKET_H */ diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index aeeb3cc8a410..15a461e2a0ed 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -112,6 +112,11 @@ ENTRY(_startup) movi a0, 0 +#if XCHAL_HAVE_VECBASE + movi a2, VECBASE_RESET_VADDR + wsr a2, vecbase +#endif + /* Clear debugging registers. */ #if XCHAL_HAVE_DEBUG diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 5d3f7a119ed1..83cf49685373 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -57,6 +57,7 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, return sys_fadvise64_64(fd, offset, len, advice); } +#ifdef CONFIG_MMU unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { @@ -93,3 +94,4 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = COLOUR_ALIGN(addr, pgoff); } } +#endif diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index f54f78e24d7b..e601e2fbe8e6 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -2,6 +2,6 @@ # Makefile for the Linux/Xtensa-specific parts of the memory manager. # -obj-y := init.o cache.o misc.o -obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o +obj-y := init.o misc.o +obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 77ed20209ca5..9a9a5935bd36 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -239,6 +239,17 @@ void __init bootmem_init(void) unsigned long bootmap_start, bootmap_size; int i; + /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory + * accounting doesn't work for pages below that address. + * + * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0: + * successfull allocations should never return NULL. + */ + if (PLATFORM_DEFAULT_MEM_START) + mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0); + else + mem_reserve(0, 1, 0); + sysmem_dump(); max_low_pfn = max_pfn = 0; min_low_pfn = ~0; @@ -332,18 +343,24 @@ void __init mem_init(void) " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif +#ifdef CONFIG_MMU " vmalloc : 0x%08x - 0x%08x (%5u MB)\n" - " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n", +#endif + " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #endif +#ifdef CONFIG_MMU VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, PAGE_OFFSET, PAGE_OFFSET + (max_low_pfn - min_low_pfn) * PAGE_SIZE, +#else + min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, +#endif ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); } diff --git a/arch/xtensa/platforms/s6105/Makefile b/arch/xtensa/platforms/s6105/Makefile deleted file mode 100644 index 0be6194bcb72..000000000000 --- a/arch/xtensa/platforms/s6105/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -# Makefile for the Stretch S6105 eval board - -obj-y := setup.o device.o diff --git a/arch/xtensa/platforms/s6105/device.c b/arch/xtensa/platforms/s6105/device.c deleted file mode 100644 index 4f4fc971042f..000000000000 --- a/arch/xtensa/platforms/s6105/device.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * s6105 platform devices - * - * Copyright (c) 2009 emlix GmbH - */ - -#include <linux/kernel.h> -#include <linux/gpio.h> -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/phy.h> -#include <linux/platform_device.h> -#include <linux/serial.h> -#include <linux/serial_8250.h> - -#include <variant/hardware.h> -#include <variant/dmac.h> - -#include <platform/gpio.h> - -#define GPIO3_INTNUM 3 -#define UART_INTNUM 4 -#define GMAC_INTNUM 5 - -static const signed char gpio3_irq_mappings[] = { - S6_INTC_GPIO(3), - -1 -}; - -static const signed char uart_irq_mappings[] = { - S6_INTC_UART(0), - S6_INTC_UART(1), - -1, -}; - -static const signed char gmac_irq_mappings[] = { - S6_INTC_GMAC_STAT, - S6_INTC_GMAC_ERR, - S6_INTC_DMA_HOSTTERMCNT(0), - S6_INTC_DMA_HOSTTERMCNT(1), - -1 -}; - -const signed char *platform_irq_mappings[NR_IRQS] = { - [GPIO3_INTNUM] = gpio3_irq_mappings, - [UART_INTNUM] = uart_irq_mappings, - [GMAC_INTNUM] = gmac_irq_mappings, -}; - -static struct plat_serial8250_port serial_platform_data[] = { - { - .membase = (void *)S6_REG_UART + 0x0000, - .mapbase = S6_REG_UART + 0x0000, - .irq = UART_INTNUM, - .uartclk = S6_SCLK, - .regshift = 2, - .iotype = SERIAL_IO_MEM, - .flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST, - }, - { - .membase = (void *)S6_REG_UART + 0x1000, - .mapbase = S6_REG_UART + 0x1000, - .irq = UART_INTNUM, - .uartclk = S6_SCLK, - .regshift = 2, - .iotype = SERIAL_IO_MEM, - .flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST, - }, - { }, -}; - -static struct resource s6_gmac_resource[] = { - { - .name = "mem", - .start = (resource_size_t)S6_REG_GMAC, - .end = (resource_size_t)S6_REG_GMAC + 0x10000 - 1, - .flags = IORESOURCE_MEM, - }, - { - .name = "dma", - .start = (resource_size_t) - DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX), - .end = (resource_size_t) - DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX) + 0x100 - 1, - .flags = IORESOURCE_DMA, - }, - { - .name = "dma", - .start = (resource_size_t) - DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX), - .end = (resource_size_t) - DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX) + 0x100 - 1, - .flags = IORESOURCE_DMA, - }, - { - .name = "io", - .start = (resource_size_t)S6_MEM_GMAC, - .end = (resource_size_t)S6_MEM_GMAC + 0x2000000 - 1, - .flags = IORESOURCE_IO, - }, - { - .name = "irq", - .start = (resource_size_t)GMAC_INTNUM, - .flags = IORESOURCE_IRQ, - }, - { - .name = "irq", - .start = (resource_size_t)PHY_POLL, - .flags = IORESOURCE_IRQ, - }, -}; - -static int __init prepare_phy_irq(int pin) -{ - int irq; - if (gpio_request(pin, "s6gmac_phy") < 0) - goto fail; - if (gpio_direction_input(pin) < 0) - goto free; - irq = gpio_to_irq(pin); - if (irq < 0) - goto free; - if (irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW) < 0) - goto free; - return irq; -free: - gpio_free(pin); -fail: - return PHY_POLL; -} - -static struct platform_device platform_devices[] = { - { - .name = "serial8250", - .id = PLAT8250_DEV_PLATFORM, - .dev = { - .platform_data = serial_platform_data, - }, - }, - { - .name = "s6gmac", - .id = 0, - .resource = s6_gmac_resource, - .num_resources = ARRAY_SIZE(s6_gmac_resource), - }, - { - I2C_BOARD_INFO("m41t62", S6I2C_ADDR_M41T62), - }, -}; - -static int __init device_init(void) -{ - int i; - - s6_gmac_resource[5].start = prepare_phy_irq(GPIO_PHY_IRQ); - - for (i = 0; i < ARRAY_SIZE(platform_devices); i++) - platform_device_register(&platform_devices[i]); - return 0; -} -arch_initcall_sync(device_init); diff --git a/arch/xtensa/platforms/s6105/include/platform/gpio.h b/arch/xtensa/platforms/s6105/include/platform/gpio.h deleted file mode 100644 index fa11aa4b61e9..000000000000 --- a/arch/xtensa/platforms/s6105/include/platform/gpio.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __ASM_XTENSA_S6105_GPIO_H -#define __ASM_XTENSA_S6105_GPIO_H - -#define GPIO_BP_TEMP_ALARM 0 -#define GPIO_PB_RESET_IN 1 -#define GPIO_EXP_IRQ 2 -#define GPIO_TRIGGER_IRQ 3 -#define GPIO_RTC_IRQ 4 -#define GPIO_PHY_IRQ 5 -#define GPIO_IMAGER_RESET 6 -#define GPIO_SD_IRQ 7 -#define GPIO_MINI_BOOT_INH 8 -#define GPIO_BOARD_RESET 9 -#define GPIO_EXP_PRESENT 10 -#define GPIO_LED1_NGREEN 12 -#define GPIO_LED1_RED 13 -#define GPIO_LED0_NGREEN 14 -#define GPIO_LED0_NRED 15 -#define GPIO_SPI_CS0 16 -#define GPIO_SPI_CS1 17 -#define GPIO_SPI_CS3 19 -#define GPIO_SPI_CS4 20 -#define GPIO_SD_WP 21 -#define GPIO_BP_RESET 22 -#define GPIO_ALARM_OUT 23 - -#endif /* __ASM_XTENSA_S6105_GPIO_H */ diff --git a/arch/xtensa/platforms/s6105/include/platform/hardware.h b/arch/xtensa/platforms/s6105/include/platform/hardware.h deleted file mode 100644 index d628efac7089..000000000000 --- a/arch/xtensa/platforms/s6105/include/platform/hardware.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __XTENSA_S6105_HARDWARE_H -#define __XTENSA_S6105_HARDWARE_H - -#define PLATFORM_DEFAULT_MEM_START 0x40000000 -#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000 - -#define MAX_DMA_ADDRESS 0 - -#define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x1000) - -#endif /* __XTENSA_S6105_HARDWARE_H */ diff --git a/arch/xtensa/platforms/s6105/include/platform/serial.h b/arch/xtensa/platforms/s6105/include/platform/serial.h deleted file mode 100644 index c8a771e5981b..000000000000 --- a/arch/xtensa/platforms/s6105/include/platform/serial.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_XTENSA_S6105_SERIAL_H -#define __ASM_XTENSA_S6105_SERIAL_H - -#include <variant/hardware.h> - -#define BASE_BAUD (S6_SCLK / 16) - -#endif /* __ASM_XTENSA_S6105_SERIAL_H */ diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c deleted file mode 100644 index 86ce730f7913..000000000000 --- a/arch/xtensa/platforms/s6105/setup.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * s6105 control routines - * - * Copyright (c) 2009 emlix GmbH - */ -#include <linux/irq.h> -#include <linux/io.h> -#include <linux/gpio.h> - -#include <asm/bootparam.h> - -#include <variant/hardware.h> -#include <variant/gpio.h> - -#include <platform/gpio.h> - -void platform_halt(void) -{ - local_irq_disable(); - while (1) - ; -} - -void platform_power_off(void) -{ - platform_halt(); -} - -void platform_restart(void) -{ - platform_halt(); -} - -void __init platform_setup(char **cmdline) -{ - unsigned long reg; - - reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL); - reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC | - S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII); - reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC | - S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII; - writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL); - - reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE); - reg &= ~(1 << S6_GREG1_BLOCK_SB); - reg &= ~(1 << S6_GREG1_BLOCK_GMAC); - writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE); - - reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA); - reg |= 1 << S6_GREG1_BLOCK_SB; - reg |= 1 << S6_GREG1_BLOCK_GMAC; - writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA); - - printk(KERN_NOTICE "S6105 on Stretch S6000 - " - "Copyright (C) 2009 emlix GmbH <info@emlix.com>\n"); -} - -void __init platform_init(bp_tag_t *first) -{ - s6_gpio_init(0); - gpio_request(GPIO_LED1_NGREEN, "led1_green"); - gpio_request(GPIO_LED1_RED, "led1_red"); - gpio_direction_output(GPIO_LED1_NGREEN, 1); -} - -void platform_heartbeat(void) -{ - static unsigned int c; - - if (!(++c & 0x4F)) - gpio_direction_output(GPIO_LED1_RED, !(c & 0x10)); -} diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index aeb316b7ff88..6edd20bb4565 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h @@ -17,8 +17,8 @@ /* Memory configuration. */ -#define PLATFORM_DEFAULT_MEM_START 0x00000000 -#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000 +#define PLATFORM_DEFAULT_MEM_START CONFIG_DEFAULT_MEM_START +#define PLATFORM_DEFAULT_MEM_SIZE CONFIG_DEFAULT_MEM_SIZE /* Interrupt configuration. */ diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile deleted file mode 100644 index 3e7ef0a0c498..000000000000 --- a/arch/xtensa/variants/s6000/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# s6000 Makefile - -obj-y += irq.o gpio.o dmac.o -obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o diff --git a/arch/xtensa/variants/s6000/delay.c b/arch/xtensa/variants/s6000/delay.c deleted file mode 100644 index 39154563ee17..000000000000 --- a/arch/xtensa/variants/s6000/delay.c +++ /dev/null @@ -1,25 +0,0 @@ -#include <asm/timex.h> -#include <asm/io.h> -#include <variant/hardware.h> - -#define LOOPS 10 -void platform_calibrate_ccount(void) -{ - u32 uninitialized_var(a); - u32 uninitialized_var(u); - u32 b; - u32 tstamp = S6_REG_GREG1 + S6_GREG1_GLOBAL_TIMER; - int i = LOOPS+1; - do { - u32 t = u; - asm volatile( - "1: l32i %0, %2, 0 ;" - " beq %0, %1, 1b ;" - : "=&a"(u) : "a"(t), "a"(tstamp)); - b = get_ccount(); - if (i == LOOPS) - a = b; - } while (--i >= 0); - b -= a; - ccount_freq = b * (100000UL / LOOPS); -} diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c deleted file mode 100644 index 340f5bb0b5ef..000000000000 --- a/arch/xtensa/variants/s6000/dmac.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Authors: Oskar Schirmer <oskar@scara.com> - * Daniel Gloeckner <dg@emlix.com> - * (c) 2008 emlix GmbH http://www.emlix.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/io.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/spinlock.h> -#include <asm/cacheflush.h> -#include <variant/dmac.h> - -/* DMA engine lookup */ - -struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB]; - - -/* DMA control, per engine */ - -void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size) -{ - if (xtensa_need_flush_dma_source(src)) { - u32 base = src; - u32 span = size; - u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK); - if (chunk && (size > chunk)) { - s32 skip = - readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP); - u32 gaps = (size+chunk-1)/chunk - 1; - if (skip >= 0) { - span += gaps * skip; - } else if (-skip > chunk) { - s32 decr = gaps * (chunk + skip); - base += decr; - span = chunk - decr; - } else { - span = max(span + gaps * skip, - (chunk + skip) * gaps - skip); - } - } - flush_dcache_unaligned(base, span); - } - if (xtensa_need_invalidate_dma_destination(dst)) { - u32 base = dst; - u32 span = size; - u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK); - if (chunk && (size > chunk)) { - s32 skip = - readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP); - u32 gaps = (size+chunk-1)/chunk - 1; - if (skip >= 0) { - span += gaps * skip; - } else if (-skip > chunk) { - s32 decr = gaps * (chunk + skip); - base += decr; - span = chunk - decr; - } else { - span = max(span + gaps * skip, - (chunk + skip) * gaps - skip); - } - } - invalidate_dcache_unaligned(base, span); - } - s6dmac_put_fifo(dmac, chan, src, dst, size); -} - -void s6dmac_disable_error_irqs(u32 dmac, u32 mask) -{ - unsigned long flags; - spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock; - spin_lock_irqsave(spinl, flags); - _s6dmac_disable_error_irqs(dmac, mask); - spin_unlock_irqrestore(spinl, flags); -} - -u32 s6dmac_int_sources(u32 dmac, u32 channel) -{ - u32 mask, ret, tmp; - mask = 1 << channel; - - tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT); - tmp &= mask; - writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR); - ret = tmp >> channel; - - tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT); - tmp &= mask; - writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR); - ret |= (tmp >> channel) << 1; - - tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT); - tmp &= mask; - writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR); - ret |= (tmp >> channel) << 2; - - tmp = readl(dmac + S6_DMA_INTRAW0); - tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER); - writel(tmp, dmac + S6_DMA_INTCLEAR0); - - if (tmp & (mask << S6_DMA_INT0_UNDER)) - ret |= 1 << 3; - if (tmp & (mask << S6_DMA_INT0_OVER)) - ret |= 1 << 4; - - tmp = readl(dmac + S6_DMA_MASTERERRINFO); - mask <<= S6_DMA_INT1_CHANNEL; - if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK) - == channel) - mask |= 1 << S6_DMA_INT1_MASTER; - if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK) - == channel) - mask |= 1 << (S6_DMA_INT1_MASTER + 1); - if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK) - == channel) - mask |= 1 << (S6_DMA_INT1_MASTER + 2); - - tmp = readl(dmac + S6_DMA_INTRAW1) & mask; - writel(tmp, dmac + S6_DMA_INTCLEAR1); - ret |= ((tmp >> channel) & 1) << 5; - ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6; - - return ret; -} - -void s6dmac_release_chan(u32 dmac, int chan) -{ - if (chan >= 0) - s6dmac_disable_chan(dmac, chan); -} - - -/* global init */ - -static inline void __init dmac_init(u32 dmac, u8 chan_nb) -{ - s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac; - spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock); - s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb; - writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER, - dmac + S6_DMA_INTCLEAR1); -} - -static inline void __init dmac_master(u32 dmac, - u32 m0start, u32 m0end, u32 m1start, u32 m1end) -{ - writel(m0start, dmac + S6_DMA_MASTER0START); - writel(m0end - 1, dmac + S6_DMA_MASTER0END); - writel(m1start, dmac + S6_DMA_MASTER1START); - writel(m1end - 1, dmac + S6_DMA_MASTER1END); -} - -static void __init s6_dmac_init(void) -{ - dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB); - dmac_master(S6_REG_LMSDMA, - S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC); - dmac_init(S6_REG_NIDMA, S6_NIDMA_NB); - dmac_init(S6_REG_DPDMA, S6_DPDMA_NB); - dmac_master(S6_REG_DPDMA, - S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA); - dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB); - dmac_master(S6_REG_HIFDMA, - S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX); -} - -arch_initcall(s6_dmac_init); diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c deleted file mode 100644 index da9e85c13b08..000000000000 --- a/arch/xtensa/variants/s6000/gpio.c +++ /dev/null @@ -1,230 +0,0 @@ -/* - * s6000 gpio driver - * - * Copyright (c) 2009 emlix GmbH - * Authors: Oskar Schirmer <oskar@scara.com> - * Johannes Weiner <hannes@cmpxchg.org> - * Daniel Gloeckner <dg@emlix.com> - */ -#include <linux/bitops.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/gpio.h> - -#include <variant/hardware.h> - -#define IRQ_BASE XTENSA_NR_IRQS - -#define S6_GPIO_DATA 0x000 -#define S6_GPIO_IS 0x404 -#define S6_GPIO_IBE 0x408 -#define S6_GPIO_IEV 0x40C -#define S6_GPIO_IE 0x410 -#define S6_GPIO_RIS 0x414 -#define S6_GPIO_MIS 0x418 -#define S6_GPIO_IC 0x41C -#define S6_GPIO_AFSEL 0x420 -#define S6_GPIO_DIR 0x800 -#define S6_GPIO_BANK(nr) ((nr) * 0x1000) -#define S6_GPIO_MASK(nr) (4 << (nr)) -#define S6_GPIO_OFFSET(nr) \ - (S6_GPIO_BANK((nr) >> 3) + S6_GPIO_MASK((nr) & 7)) - -static int direction_input(struct gpio_chip *chip, unsigned int off) -{ - writeb(0, S6_REG_GPIO + S6_GPIO_DIR + S6_GPIO_OFFSET(off)); - return 0; -} - -static int get(struct gpio_chip *chip, unsigned int off) -{ - return readb(S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off)); -} - -static int direction_output(struct gpio_chip *chip, unsigned int off, int val) -{ - unsigned rel = S6_GPIO_OFFSET(off); - writeb(~0, S6_REG_GPIO + S6_GPIO_DIR + rel); - writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + rel); - return 0; -} - -static void set(struct gpio_chip *chip, unsigned int off, int val) -{ - writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off)); -} - -static int to_irq(struct gpio_chip *chip, unsigned offset) -{ - if (offset < 8) - return offset + IRQ_BASE; - return -EINVAL; -} - -static struct gpio_chip gpiochip = { - .owner = THIS_MODULE, - .direction_input = direction_input, - .get = get, - .direction_output = direction_output, - .set = set, - .to_irq = to_irq, - .base = 0, - .ngpio = 24, - .can_sleep = 0, /* no blocking io needed */ - .exported = 0, /* no exporting to userspace */ -}; - -int s6_gpio_init(u32 afsel) -{ - writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL); - writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL); - writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL); - return gpiochip_add(&gpiochip); -} - -static void ack(struct irq_data *d) -{ - writeb(1 << (d->irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC); -} - -static void mask(struct irq_data *d) -{ - u8 r = readb(S6_REG_GPIO + S6_GPIO_IE); - r &= ~(1 << (d->irq - IRQ_BASE)); - writeb(r, S6_REG_GPIO + S6_GPIO_IE); -} - -static void unmask(struct irq_data *d) -{ - u8 m = readb(S6_REG_GPIO + S6_GPIO_IE); - m |= 1 << (d->irq - IRQ_BASE); - writeb(m, S6_REG_GPIO + S6_GPIO_IE); -} - -static int set_type(struct irq_data *d, unsigned int type) -{ - const u8 m = 1 << (d->irq - IRQ_BASE); - irq_flow_handler_t handler; - u8 reg; - - if (type == IRQ_TYPE_PROBE) { - if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m) - || (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m) - || readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR - + S6_GPIO_MASK(irq - IRQ_BASE))) - return 0; - type = IRQ_TYPE_EDGE_BOTH; - } - - reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS); - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) { - reg |= m; - handler = handle_level_irq; - } else { - reg &= ~m; - handler = handle_edge_irq; - } - writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS); - __irq_set_handler_locked(irq, handler); - - reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV); - if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)) - reg |= m; - else - reg &= ~m; - writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV); - - reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE); - if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) - reg |= m; - else - reg &= ~m; - writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE); - return 0; -} - -static struct irq_chip gpioirqs = { - .name = "GPIO", - .irq_ack = ack, - .irq_mask = mask, - .irq_unmask = unmask, - .irq_set_type = set_type, -}; - -static u8 demux_masks[4]; - -static void demux_irqs(unsigned int irq, struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - u8 *mask = irq_desc_get_handler_data(desc); - u8 pending; - int cirq; - - chip->irq_mask(&desc->irq_data); - chip->irq_ack(&desc->irq_data); - pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask; - cirq = IRQ_BASE - 1; - while (pending) { - int n = ffs(pending); - cirq += n; - pending >>= n; - generic_handle_irq(cirq); - } - chip->irq_unmask(&desc->irq_data); -} - -extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS]; - -void __init variant_init_irq(void) -{ - int irq, n; - writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE); - for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) { - const signed char *mapping = platform_irq_mappings[irq]; - int alone = 1; - u8 mask; - if (!mapping) - continue; - for(mask = 0; *mapping != -1; mapping++) - switch (*mapping) { - case S6_INTC_GPIO(0): - mask |= 1 << 0; - break; - case S6_INTC_GPIO(1): - mask |= 1 << 1; - break; - case S6_INTC_GPIO(2): - mask |= 1 << 2; - break; - case S6_INTC_GPIO(3): - mask |= 0x1f << 3; - break; - default: - alone = 0; - } - if (mask) { - int cirq, i; - if (!alone) { - printk(KERN_ERR "chained irq chips can't share" - " parent irq %i\n", irq); - continue; - } - demux_masks[n] = mask; - cirq = IRQ_BASE - 1; - do { - i = ffs(mask); - cirq += i; - mask >>= i; - irq_set_chip(cirq, &gpioirqs); - irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); - } while (mask); - irq_set_handler_data(irq, demux_masks + n); - irq_set_chained_handler(irq, demux_irqs); - if (++n == ARRAY_SIZE(demux_masks)) - break; - } - } -} diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h deleted file mode 100644 index af007953027e..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/core.h +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Xtensa processor core configuration information. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 1999-2008 Tensilica Inc. - */ - -#ifndef _XTENSA_CORE_CONFIGURATION_H -#define _XTENSA_CORE_CONFIGURATION_H - - -/**************************************************************************** - Parameters Useful for Any Code, USER or PRIVILEGED - ****************************************************************************/ - -/* - * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is - * configured, and a value of 0 otherwise. These macros are always defined. - */ - - -/*---------------------------------------------------------------------- - ISA - ----------------------------------------------------------------------*/ - -#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */ -#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */ -#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */ -#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */ -#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */ -#define XCHAL_HAVE_DEBUG 1 /* debug option */ -#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */ -#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */ -#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */ -#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */ -#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */ -#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */ -#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */ -#define XCHAL_HAVE_MUL32 1 /* MULL instruction */ -#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */ -#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */ -#define XCHAL_HAVE_L32R 1 /* L32R instruction */ -#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */ -#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */ -#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */ -#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */ -#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */ -#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */ -#define XCHAL_HAVE_ABS 1 /* ABS instruction */ -/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */ -/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */ -#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */ -#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */ -#define XCHAL_HAVE_SPECULATION 0 /* speculation */ -#define XCHAL_HAVE_FULL_RESET 0 /* all regs/state reset */ -#define XCHAL_NUM_CONTEXTS 1 /* */ -#define XCHAL_NUM_MISC_REGS 4 /* num of scratch regs (0..4) */ -#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */ -#define XCHAL_HAVE_PRID 0 /* processor ID register */ -#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */ -#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */ -#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */ -#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */ -#define XCHAL_HAVE_MAC16 0 /* MAC16 package */ -#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */ -#define XCHAL_HAVE_FP 1 /* floating point pkg */ -#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */ -#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */ -#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */ - - -/*---------------------------------------------------------------------- - MISC - ----------------------------------------------------------------------*/ - -#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */ -#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */ -#define XCHAL_DATA_WIDTH 16 /* data width in bytes */ -/* In T1050, applies to selected core load and store instructions (see ISA): */ -#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */ -#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/ - -#define XCHAL_SW_VERSION 701001 /* sw version of this header */ - -#define XCHAL_CORE_ID "stretch_bali" /* alphanum core name - (CoreID) set in the Xtensa - Processor Generator */ - -#define XCHAL_BUILD_UNIQUE_ID 0x000104B9 /* 22-bit sw build ID */ - -/* - * These definitions describe the hardware targeted by this software. - */ -#define XCHAL_HW_CONFIGID0 0xC2F3F9FE /* ConfigID hi 32 bits*/ -#define XCHAL_HW_CONFIGID1 0x054104B9 /* ConfigID lo 32 bits*/ -#define XCHAL_HW_VERSION_NAME "LX1.0.2" /* full version name */ -#define XCHAL_HW_VERSION_MAJOR 2100 /* major ver# of targeted hw */ -#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */ -#define XCHAL_HW_VERSION 210002 /* major*100+minor */ -#define XCHAL_HW_REL_LX1 1 -#define XCHAL_HW_REL_LX1_0 1 -#define XCHAL_HW_REL_LX1_0_2 1 -#define XCHAL_HW_CONFIGID_RELIABLE 1 -/* If software targets a *range* of hardware versions, these are the bounds: */ -#define XCHAL_HW_MIN_VERSION_MAJOR 2100 /* major v of earliest tgt hw */ -#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */ -#define XCHAL_HW_MIN_VERSION 210002 /* earliest targeted hw */ -#define XCHAL_HW_MAX_VERSION_MAJOR 2100 /* major v of latest tgt hw */ -#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */ -#define XCHAL_HW_MAX_VERSION 210002 /* latest targeted hw */ - - -/*---------------------------------------------------------------------- - CACHE - ----------------------------------------------------------------------*/ - -#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */ -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */ -#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */ -#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */ - -#define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */ -#define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */ - -#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */ - - - - -/**************************************************************************** - Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code - ****************************************************************************/ - - -#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY - -/*---------------------------------------------------------------------- - CACHE - ----------------------------------------------------------------------*/ - -#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */ - -/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */ - -/* Number of cache sets in log2(lines per way): */ -#define XCHAL_ICACHE_SETWIDTH 9 -#define XCHAL_DCACHE_SETWIDTH 10 - -/* Cache set associativity (number of ways): */ -#define XCHAL_ICACHE_WAYS 4 -#define XCHAL_DCACHE_WAYS 2 - -/* Cache features: */ -#define XCHAL_ICACHE_LINE_LOCKABLE 1 -#define XCHAL_DCACHE_LINE_LOCKABLE 0 -#define XCHAL_ICACHE_ECC_PARITY 0 -#define XCHAL_DCACHE_ECC_PARITY 0 - -/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */ -#define XCHAL_CA_BITS 4 - - -/*---------------------------------------------------------------------- - INTERNAL I/D RAM/ROMs and XLMI - ----------------------------------------------------------------------*/ - -#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */ -#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */ -#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */ -#define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */ -#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/ -#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */ - -/* Data RAM 0: */ -#define XCHAL_DATARAM0_VADDR 0x3FFF0000 -#define XCHAL_DATARAM0_PADDR 0x3FFF0000 -#define XCHAL_DATARAM0_SIZE 65536 -#define XCHAL_DATARAM0_ECC_PARITY 0 - -/* XLMI Port 0: */ -#define XCHAL_XLMI0_VADDR 0x37F80000 -#define XCHAL_XLMI0_PADDR 0x37F80000 -#define XCHAL_XLMI0_SIZE 262144 -#define XCHAL_XLMI0_ECC_PARITY 0 - - -/*---------------------------------------------------------------------- - INTERRUPTS and TIMERS - ----------------------------------------------------------------------*/ - -#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */ -#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */ -#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */ -#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */ -#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */ -#define XCHAL_NUM_INTERRUPTS 27 /* number of interrupts */ -#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */ -#define XCHAL_NUM_EXTINTERRUPTS 20 /* num of external interrupts */ -#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels - (not including level zero) */ -#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */ - /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */ - -/* Masks of interrupts at each interrupt level: */ -#define XCHAL_INTLEVEL1_MASK 0x01F07FFF -#define XCHAL_INTLEVEL2_MASK 0x02018000 -#define XCHAL_INTLEVEL3_MASK 0x04060000 -#define XCHAL_INTLEVEL4_MASK 0x00000000 -#define XCHAL_INTLEVEL5_MASK 0x00080000 -#define XCHAL_INTLEVEL6_MASK 0x00000000 -#define XCHAL_INTLEVEL7_MASK 0x00000000 - -/* Masks of interrupts at each range 1..n of interrupt levels: */ -#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x01F07FFF -#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x03F1FFFF -#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x07F7FFFF -#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x07F7FFFF -#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x07FFFFFF -#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x07FFFFFF -#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x07FFFFFF - -/* Level of each interrupt: */ -#define XCHAL_INT0_LEVEL 1 -#define XCHAL_INT1_LEVEL 1 -#define XCHAL_INT2_LEVEL 1 -#define XCHAL_INT3_LEVEL 1 -#define XCHAL_INT4_LEVEL 1 -#define XCHAL_INT5_LEVEL 1 -#define XCHAL_INT6_LEVEL 1 -#define XCHAL_INT7_LEVEL 1 -#define XCHAL_INT8_LEVEL 1 -#define XCHAL_INT9_LEVEL 1 -#define XCHAL_INT10_LEVEL 1 -#define XCHAL_INT11_LEVEL 1 -#define XCHAL_INT12_LEVEL 1 -#define XCHAL_INT13_LEVEL 1 -#define XCHAL_INT14_LEVEL 1 -#define XCHAL_INT15_LEVEL 2 -#define XCHAL_INT16_LEVEL 2 -#define XCHAL_INT17_LEVEL 3 -#define XCHAL_INT18_LEVEL 3 -#define XCHAL_INT19_LEVEL 5 -#define XCHAL_INT20_LEVEL 1 -#define XCHAL_INT21_LEVEL 1 -#define XCHAL_INT22_LEVEL 1 -#define XCHAL_INT23_LEVEL 1 -#define XCHAL_INT24_LEVEL 1 -#define XCHAL_INT25_LEVEL 2 -#define XCHAL_INT26_LEVEL 3 -#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */ -#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */ -#define XCHAL_NMILEVEL 5 /* NMI "level" (for use with - EXCSAVE/EPS/EPC_n, RFI n) */ - -/* Type of each interrupt: */ -#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT13_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT14_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_LEVEL -#define XCHAL_INT19_TYPE XTHAL_INTTYPE_NMI -#define XCHAL_INT20_TYPE XTHAL_INTTYPE_SOFTWARE -#define XCHAL_INT21_TYPE XTHAL_INTTYPE_SOFTWARE -#define XCHAL_INT22_TYPE XTHAL_INTTYPE_SOFTWARE -#define XCHAL_INT23_TYPE XTHAL_INTTYPE_SOFTWARE -#define XCHAL_INT24_TYPE XTHAL_INTTYPE_TIMER -#define XCHAL_INT25_TYPE XTHAL_INTTYPE_TIMER -#define XCHAL_INT26_TYPE XTHAL_INTTYPE_TIMER - -/* Masks of interrupts for each type of interrupt: */ -#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xF8000000 -#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00F00000 -#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000000 -#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0007FFFF -#define XCHAL_INTTYPE_MASK_TIMER 0x07000000 -#define XCHAL_INTTYPE_MASK_NMI 0x00080000 -#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000 - -/* Interrupt numbers assigned to specific interrupt sources: */ -#define XCHAL_TIMER0_INTERRUPT 24 /* CCOMPARE0 */ -#define XCHAL_TIMER1_INTERRUPT 25 /* CCOMPARE1 */ -#define XCHAL_TIMER2_INTERRUPT 26 /* CCOMPARE2 */ -#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED -#define XCHAL_NMI_INTERRUPT 19 /* non-maskable interrupt */ - -/* Interrupt numbers for levels at which only one interrupt is configured: */ -#define XCHAL_INTLEVEL5_NUM 19 -/* (There are many interrupts each at level(s) 1, 2, 3.) */ - - -/* - * External interrupt vectors/levels. - * These macros describe how Xtensa processor interrupt numbers - * (as numbered internally, eg. in INTERRUPT and INTENABLE registers) - * map to external BInterrupt<n> pins, for those interrupts - * configured as external (level-triggered, edge-triggered, or NMI). - * See the Xtensa processor databook for more details. - */ - -/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */ -#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */ -#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */ -#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */ -#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */ -#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */ -#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */ -#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */ -#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */ -#define XCHAL_EXTINT8_NUM 8 /* (intlevel 1) */ -#define XCHAL_EXTINT9_NUM 9 /* (intlevel 1) */ -#define XCHAL_EXTINT10_NUM 10 /* (intlevel 1) */ -#define XCHAL_EXTINT11_NUM 11 /* (intlevel 1) */ -#define XCHAL_EXTINT12_NUM 12 /* (intlevel 1) */ -#define XCHAL_EXTINT13_NUM 13 /* (intlevel 1) */ -#define XCHAL_EXTINT14_NUM 14 /* (intlevel 1) */ -#define XCHAL_EXTINT15_NUM 15 /* (intlevel 2) */ -#define XCHAL_EXTINT16_NUM 16 /* (intlevel 2) */ -#define XCHAL_EXTINT17_NUM 17 /* (intlevel 3) */ -#define XCHAL_EXTINT18_NUM 18 /* (intlevel 3) */ -#define XCHAL_EXTINT19_NUM 19 /* (intlevel 5) */ - - -/*---------------------------------------------------------------------- - EXCEPTIONS and VECTORS - ----------------------------------------------------------------------*/ - -#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture - number: 1 == XEA1 (old) - 2 == XEA2 (new) - 0 == XEAX (extern) */ -#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */ -#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */ -#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */ -#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */ -#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */ -#define XCHAL_HAVE_VECTOR_SELECT 0 /* relocatable vectors */ -#define XCHAL_HAVE_VECBASE 0 /* relocatable vectors */ - -#define XCHAL_RESET_VECOFS 0x00000000 -#define XCHAL_RESET_VECTOR_VADDR 0x3FFE03D0 -#define XCHAL_RESET_VECTOR_PADDR 0x3FFE03D0 -#define XCHAL_USER_VECOFS 0x00000000 -#define XCHAL_USER_VECTOR_VADDR 0x40000220 -#define XCHAL_USER_VECTOR_PADDR 0x40000220 -#define XCHAL_KERNEL_VECOFS 0x00000000 -#define XCHAL_KERNEL_VECTOR_VADDR 0x40000200 -#define XCHAL_KERNEL_VECTOR_PADDR 0x40000200 -#define XCHAL_DOUBLEEXC_VECOFS 0x00000000 -#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x400002A0 -#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x400002A0 -#define XCHAL_WINDOW_OF4_VECOFS 0x00000000 -#define XCHAL_WINDOW_UF4_VECOFS 0x00000040 -#define XCHAL_WINDOW_OF8_VECOFS 0x00000080 -#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0 -#define XCHAL_WINDOW_OF12_VECOFS 0x00000100 -#define XCHAL_WINDOW_UF12_VECOFS 0x00000140 -#define XCHAL_WINDOW_VECTORS_VADDR 0x40000000 -#define XCHAL_WINDOW_VECTORS_PADDR 0x40000000 -#define XCHAL_INTLEVEL2_VECOFS 0x00000000 -#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x40000240 -#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x40000240 -#define XCHAL_INTLEVEL3_VECOFS 0x00000000 -#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x40000260 -#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x40000260 -#define XCHAL_INTLEVEL4_VECOFS 0x00000000 -#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x40000390 -#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x40000390 -#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL4_VECOFS -#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR -#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR -#define XCHAL_NMI_VECOFS 0x00000000 -#define XCHAL_NMI_VECTOR_VADDR 0x400003B0 -#define XCHAL_NMI_VECTOR_PADDR 0x400003B0 -#define XCHAL_INTLEVEL5_VECOFS XCHAL_NMI_VECOFS -#define XCHAL_INTLEVEL5_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR -#define XCHAL_INTLEVEL5_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR - - -/*---------------------------------------------------------------------- - DEBUG - ----------------------------------------------------------------------*/ - -#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */ -#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */ -#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */ -#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */ - - -/*---------------------------------------------------------------------- - MMU - ----------------------------------------------------------------------*/ - -/* See core-matmap.h header file for more details. */ - -#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */ -#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */ -#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */ -#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */ -#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */ -#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */ -#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table - [autorefill] and protection) - usable for an MMU-based OS */ -/* If none of the above last 4 are set, it's a custom TLB configuration. */ - -#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */ -#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */ -#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */ - -#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ - - -#endif /* _XTENSA_CORE_CONFIGURATION_H */ - diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h deleted file mode 100644 index 3f88d9fc6897..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/dmac.h +++ /dev/null @@ -1,387 +0,0 @@ -/* - * include/asm-xtensa/variant-s6000/dmac.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2006 Tensilica Inc. - * Copyright (C) 2008 Emlix GmbH <info@emlix.com> - * Authors: Fabian Godehardt <fg@emlix.com> - * Oskar Schirmer <oskar@scara.com> - * Daniel Gloeckner <dg@emlix.com> - */ - -#ifndef __ASM_XTENSA_S6000_DMAC_H -#define __ASM_XTENSA_S6000_DMAC_H -#include <linux/io.h> -#include <variant/hardware.h> - -/* DMA global */ - -#define S6_DMA_INTSTAT0 0x000 -#define S6_DMA_INTSTAT1 0x004 -#define S6_DMA_INTENABLE0 0x008 -#define S6_DMA_INTENABLE1 0x00C -#define S6_DMA_INTRAW0 0x010 -#define S6_DMA_INTRAW1 0x014 -#define S6_DMA_INTCLEAR0 0x018 -#define S6_DMA_INTCLEAR1 0x01C -#define S6_DMA_INTSET0 0x020 -#define S6_DMA_INTSET1 0x024 -#define S6_DMA_INT0_UNDER 0 -#define S6_DMA_INT0_OVER 16 -#define S6_DMA_INT1_CHANNEL 0 -#define S6_DMA_INT1_MASTER 16 -#define S6_DMA_INT1_MASTER_MASK 7 -#define S6_DMA_TERMCNTIRQSTAT 0x028 -#define S6_DMA_TERMCNTIRQCLR 0x02C -#define S6_DMA_TERMCNTIRQSET 0x030 -#define S6_DMA_PENDCNTIRQSTAT 0x034 -#define S6_DMA_PENDCNTIRQCLR 0x038 -#define S6_DMA_PENDCNTIRQSET 0x03C -#define S6_DMA_LOWWMRKIRQSTAT 0x040 -#define S6_DMA_LOWWMRKIRQCLR 0x044 -#define S6_DMA_LOWWMRKIRQSET 0x048 -#define S6_DMA_MASTERERRINFO 0x04C -#define S6_DMA_MASTERERR_CHAN(n) (4*(n)) -#define S6_DMA_MASTERERR_CHAN_MASK 0xF -#define S6_DMA_DESCRFIFO0 0x050 -#define S6_DMA_DESCRFIFO1 0x054 -#define S6_DMA_DESCRFIFO2 0x058 -#define S6_DMA_DESCRFIFO2_AUTODISABLE 24 -#define S6_DMA_DESCRFIFO3 0x05C -#define S6_DMA_MASTER0START 0x060 -#define S6_DMA_MASTER0END 0x064 -#define S6_DMA_MASTER1START 0x068 -#define S6_DMA_MASTER1END 0x06C -#define S6_DMA_NEXTFREE 0x070 -#define S6_DMA_NEXTFREE_CHAN 0 -#define S6_DMA_NEXTFREE_CHAN_MASK 0x1F -#define S6_DMA_NEXTFREE_ENA 16 -#define S6_DMA_NEXTFREE_ENA_MASK ((1 << 16) - 1) -#define S6_DMA_DPORTCTRLGRP(p) ((p) * 4 + 0x074) -#define S6_DMA_DPORTCTRLGRP_FRAMEREP 0 -#define S6_DMA_DPORTCTRLGRP_NRCHANS 1 -#define S6_DMA_DPORTCTRLGRP_NRCHANS_1 0 -#define S6_DMA_DPORTCTRLGRP_NRCHANS_3 1 -#define S6_DMA_DPORTCTRLGRP_NRCHANS_4 2 -#define S6_DMA_DPORTCTRLGRP_NRCHANS_2 3 -#define S6_DMA_DPORTCTRLGRP_ENA 31 - - -/* DMA per channel */ - -#define DMA_CHNL(dmac, n) ((dmac) + 0x1000 + (n) * 0x100) -#define DMA_INDEX_CHNL(addr) (((addr) >> 8) & 0xF) -#define DMA_MASK_DMAC(addr) ((addr) & 0xFFFF0000) -#define S6_DMA_CHNCTRL 0x000 -#define S6_DMA_CHNCTRL_ENABLE 0 -#define S6_DMA_CHNCTRL_PAUSE 1 -#define S6_DMA_CHNCTRL_PRIO 2 -#define S6_DMA_CHNCTRL_PRIO_MASK 3 -#define S6_DMA_CHNCTRL_PERIPHXFER 4 -#define S6_DMA_CHNCTRL_PERIPHENA 5 -#define S6_DMA_CHNCTRL_SRCINC 6 -#define S6_DMA_CHNCTRL_DSTINC 7 -#define S6_DMA_CHNCTRL_BURSTLOG 8 -#define S6_DMA_CHNCTRL_BURSTLOG_MASK 7 -#define S6_DMA_CHNCTRL_DESCFIFODEPTH 12 -#define S6_DMA_CHNCTRL_DESCFIFODEPTH_MASK 0x1F -#define S6_DMA_CHNCTRL_DESCFIFOFULL 17 -#define S6_DMA_CHNCTRL_BWCONSEL 18 -#define S6_DMA_CHNCTRL_BWCONENA 19 -#define S6_DMA_CHNCTRL_PENDGCNTSTAT 20 -#define S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK 0x3F -#define S6_DMA_CHNCTRL_LOWWMARK 26 -#define S6_DMA_CHNCTRL_LOWWMARK_MASK 0xF -#define S6_DMA_CHNCTRL_TSTAMP 30 -#define S6_DMA_TERMCNTNB 0x004 -#define S6_DMA_TERMCNTNB_MASK 0xFFFF -#define S6_DMA_TERMCNTTMO 0x008 -#define S6_DMA_TERMCNTSTAT 0x00C -#define S6_DMA_TERMCNTSTAT_MASK 0xFF -#define S6_DMA_CMONCHUNK 0x010 -#define S6_DMA_SRCSKIP 0x014 -#define S6_DMA_DSTSKIP 0x018 -#define S6_DMA_CUR_SRC 0x024 -#define S6_DMA_CUR_DST 0x028 -#define S6_DMA_TIMESTAMP 0x030 - -/* DMA channel lists */ - -#define S6_DPDMA_CHAN(stream, channel) (4 * (stream) + (channel)) -#define S6_DPDMA_NB 16 - -#define S6_HIFDMA_GMACTX 0 -#define S6_HIFDMA_GMACRX 1 -#define S6_HIFDMA_I2S0 2 -#define S6_HIFDMA_I2S1 3 -#define S6_HIFDMA_EGIB 4 -#define S6_HIFDMA_PCITX 5 -#define S6_HIFDMA_PCIRX 6 -#define S6_HIFDMA_NB 7 - -#define S6_NIDMA_NB 4 - -#define S6_LMSDMA_NB 12 - -/* controller access */ - -#define S6_DMAC_NB 4 -#define S6_DMAC_INDEX(dmac) (((unsigned)(dmac) >> 18) % S6_DMAC_NB) - -struct s6dmac_ctrl { - u32 dmac; - spinlock_t lock; - u8 chan_nb; -}; - -extern struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB]; - - -/* DMA control, per channel */ - -static inline int s6dmac_fifo_full(u32 dmac, int chan) -{ - return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) - & (1 << S6_DMA_CHNCTRL_DESCFIFOFULL)) && 1; -} - -static inline int s6dmac_termcnt_irq(u32 dmac, int chan) -{ - u32 m = 1 << chan; - int r = (readl(dmac + S6_DMA_TERMCNTIRQSTAT) & m) && 1; - if (r) - writel(m, dmac + S6_DMA_TERMCNTIRQCLR); - return r; -} - -static inline int s6dmac_pendcnt_irq(u32 dmac, int chan) -{ - u32 m = 1 << chan; - int r = (readl(dmac + S6_DMA_PENDCNTIRQSTAT) & m) && 1; - if (r) - writel(m, dmac + S6_DMA_PENDCNTIRQCLR); - return r; -} - -static inline int s6dmac_lowwmark_irq(u32 dmac, int chan) -{ - int r = (readl(dmac + S6_DMA_LOWWMRKIRQSTAT) & (1 << chan)) ? 1 : 0; - if (r) - writel(1 << chan, dmac + S6_DMA_LOWWMRKIRQCLR); - return r; -} - -static inline u32 s6dmac_pending_count(u32 dmac, int chan) -{ - return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) - >> S6_DMA_CHNCTRL_PENDGCNTSTAT) - & S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK; -} - -static inline void s6dmac_set_terminal_count(u32 dmac, int chan, u32 n) -{ - n &= S6_DMA_TERMCNTNB_MASK; - n |= readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB) - & ~S6_DMA_TERMCNTNB_MASK; - writel(n, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB); -} - -static inline u32 s6dmac_get_terminal_count(u32 dmac, int chan) -{ - return (readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB)) - & S6_DMA_TERMCNTNB_MASK; -} - -static inline u32 s6dmac_timestamp(u32 dmac, int chan) -{ - return readl(DMA_CHNL(dmac, chan) + S6_DMA_TIMESTAMP); -} - -static inline u32 s6dmac_cur_src(u32 dmac, int chan) -{ - return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_SRC); -} - -static inline u32 s6dmac_cur_dst(u32 dmac, int chan) -{ - return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_DST); -} - -static inline void s6dmac_disable_chan(u32 dmac, int chan) -{ - u32 ctrl; - writel(readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) - & ~(1 << S6_DMA_CHNCTRL_ENABLE), - DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL); - do - ctrl = readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL); - while (ctrl & (1 << S6_DMA_CHNCTRL_ENABLE)); -} - -static inline void s6dmac_set_stride_skip(u32 dmac, int chan, - int comchunk, /* 0: disable scatter/gather */ - int srcskip, int dstskip) -{ - writel(comchunk, DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK); - writel(srcskip, DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP); - writel(dstskip, DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP); -} - -static inline void s6dmac_enable_chan(u32 dmac, int chan, - int prio, /* 0 (highest) .. 3 (lowest) */ - int periphxfer, /* <0: disable p.req.line, 0..1: mode */ - int srcinc, int dstinc, /* 0: dont increment src/dst address */ - int comchunk, /* 0: disable scatter/gather */ - int srcskip, int dstskip, - int burstsize, /* 4 for I2S, 7 for everything else */ - int bandwidthconserve, /* <0: disable, 0..1: select */ - int lowwmark, /* 0..15 */ - int timestamp, /* 0: disable timestamp */ - int enable) /* 0: disable for now */ -{ - writel(1, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB); - writel(0, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTTMO); - writel(lowwmark << S6_DMA_CHNCTRL_LOWWMARK, - DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL); - s6dmac_set_stride_skip(dmac, chan, comchunk, srcskip, dstskip); - writel(((enable ? 1 : 0) << S6_DMA_CHNCTRL_ENABLE) | - (prio << S6_DMA_CHNCTRL_PRIO) | - (((periphxfer > 0) ? 1 : 0) << S6_DMA_CHNCTRL_PERIPHXFER) | - (((periphxfer < 0) ? 0 : 1) << S6_DMA_CHNCTRL_PERIPHENA) | - ((srcinc ? 1 : 0) << S6_DMA_CHNCTRL_SRCINC) | - ((dstinc ? 1 : 0) << S6_DMA_CHNCTRL_DSTINC) | - (burstsize << S6_DMA_CHNCTRL_BURSTLOG) | - (((bandwidthconserve > 0) ? 1 : 0) << S6_DMA_CHNCTRL_BWCONSEL) | - (((bandwidthconserve < 0) ? 0 : 1) << S6_DMA_CHNCTRL_BWCONENA) | - (lowwmark << S6_DMA_CHNCTRL_LOWWMARK) | - ((timestamp ? 1 : 0) << S6_DMA_CHNCTRL_TSTAMP), - DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL); -} - - -/* DMA control, per engine */ - -static inline unsigned _dmac_addr_index(u32 dmac) -{ - unsigned i = S6_DMAC_INDEX(dmac); - if (s6dmac_ctrl[i].dmac != dmac) - BUG(); - return i; -} - -static inline void _s6dmac_disable_error_irqs(u32 dmac, u32 mask) -{ - writel(mask, dmac + S6_DMA_TERMCNTIRQCLR); - writel(mask, dmac + S6_DMA_PENDCNTIRQCLR); - writel(mask, dmac + S6_DMA_LOWWMRKIRQCLR); - writel(readl(dmac + S6_DMA_INTENABLE0) - & ~((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER)), - dmac + S6_DMA_INTENABLE0); - writel(readl(dmac + S6_DMA_INTENABLE1) & ~(mask << S6_DMA_INT1_CHANNEL), - dmac + S6_DMA_INTENABLE1); - writel((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER), - dmac + S6_DMA_INTCLEAR0); - writel(mask << S6_DMA_INT1_CHANNEL, dmac + S6_DMA_INTCLEAR1); -} - -/* - * request channel from specified engine - * with chan<0, accept any channel - * further parameters see s6dmac_enable_chan - * returns < 0 upon error, channel nb otherwise - */ -static inline int s6dmac_request_chan(u32 dmac, int chan, - int prio, - int periphxfer, - int srcinc, int dstinc, - int comchunk, - int srcskip, int dstskip, - int burstsize, - int bandwidthconserve, - int lowwmark, - int timestamp, - int enable) -{ - int r = chan; - unsigned long flags; - spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock; - spin_lock_irqsave(spinl, flags); - if (r < 0) { - r = (readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_CHAN) - & S6_DMA_NEXTFREE_CHAN_MASK; - } - if (r >= s6dmac_ctrl[_dmac_addr_index(dmac)].chan_nb) { - if (chan < 0) - r = -EBUSY; - else - r = -ENXIO; - } else if (((readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_ENA) - >> r) & 1) { - r = -EBUSY; - } else { - s6dmac_enable_chan(dmac, r, prio, periphxfer, - srcinc, dstinc, comchunk, srcskip, dstskip, burstsize, - bandwidthconserve, lowwmark, timestamp, enable); - } - spin_unlock_irqrestore(spinl, flags); - return r; -} - -static inline void s6dmac_put_fifo(u32 dmac, int chan, - u32 src, u32 dst, u32 size) -{ - unsigned long flags; - spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock; - spin_lock_irqsave(spinl, flags); - writel(src, dmac + S6_DMA_DESCRFIFO0); - writel(dst, dmac + S6_DMA_DESCRFIFO1); - writel(size, dmac + S6_DMA_DESCRFIFO2); - writel(chan, dmac + S6_DMA_DESCRFIFO3); - spin_unlock_irqrestore(spinl, flags); -} - -static inline u32 s6dmac_channel_enabled(u32 dmac, int chan) -{ - return readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) & - (1 << S6_DMA_CHNCTRL_ENABLE); -} - -/* - * group 1-4 data port channels - * with port=0..3, nrch=1-4 channels, - * frrep=0/1 (dis- or enable frame repeat) - */ -static inline void s6dmac_dp_setup_group(u32 dmac, int port, - int nrch, int frrep) -{ - static const u8 mask[4] = {0, 3, 1, 2}; - BUG_ON(dmac != S6_REG_DPDMA); - if ((port < 0) || (port > 3) || (nrch < 1) || (nrch > 4)) - return; - writel((mask[nrch - 1] << S6_DMA_DPORTCTRLGRP_NRCHANS) - | ((frrep ? 1 : 0) << S6_DMA_DPORTCTRLGRP_FRAMEREP), - dmac + S6_DMA_DPORTCTRLGRP(port)); -} - -static inline void s6dmac_dp_switch_group(u32 dmac, int port, int enable) -{ - u32 tmp; - BUG_ON(dmac != S6_REG_DPDMA); - tmp = readl(dmac + S6_DMA_DPORTCTRLGRP(port)); - if (enable) - tmp |= (1 << S6_DMA_DPORTCTRLGRP_ENA); - else - tmp &= ~(1 << S6_DMA_DPORTCTRLGRP_ENA); - writel(tmp, dmac + S6_DMA_DPORTCTRLGRP(port)); -} - -extern void s6dmac_put_fifo_cache(u32 dmac, int chan, - u32 src, u32 dst, u32 size); -extern void s6dmac_disable_error_irqs(u32 dmac, u32 mask); -extern u32 s6dmac_int_sources(u32 dmac, u32 channel); -extern void s6dmac_release_chan(u32 dmac, int chan); - -#endif /* __ASM_XTENSA_S6000_DMAC_H */ diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h deleted file mode 100644 index 8484ab0df461..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/gpio.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _XTENSA_VARIANT_S6000_GPIO_H -#define _XTENSA_VARIANT_S6000_GPIO_H - -extern int s6_gpio_init(u32 afsel); - -#endif /* _XTENSA_VARIANT_S6000_GPIO_H */ diff --git a/arch/xtensa/variants/s6000/include/variant/hardware.h b/arch/xtensa/variants/s6000/include/variant/hardware.h deleted file mode 100644 index 5d9ba098d84a..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/hardware.h +++ /dev/null @@ -1,259 +0,0 @@ -#ifndef __XTENSA_S6000_HARDWARE_H -#define __XTENSA_S6000_HARDWARE_H - -#define S6_SCLK 1843200 - -#define S6_MEM_REG 0x20000000 -#define S6_MEM_EFI 0x33F00000 -#define S6_MEM_PCIE_DATARAM1 0x34000000 -#define S6_MEM_XLMI 0x37F80000 -#define S6_MEM_PIF_DATARAM1 0x37FFC000 -#define S6_MEM_GMAC 0x38000000 -#define S6_MEM_I2S 0x3A000000 -#define S6_MEM_EGIB 0x3C000000 -#define S6_MEM_PCIE_CFG 0x3E000000 -#define S6_MEM_PIF_DATARAM 0x3FFE0000 -#define S6_MEM_XLMI_DATARAM 0x3FFF0000 -#define S6_MEM_DDR 0x40000000 -#define S6_MEM_PCIE_APER 0xC0000000 -#define S6_MEM_AUX 0xF0000000 - -/* Device addresses */ - -#define S6_REG_SCB S6_MEM_REG -#define S6_REG_NB (S6_REG_SCB + 0x10000) -#define S6_REG_LMSDMA (S6_REG_SCB + 0x20000) -#define S6_REG_NI (S6_REG_SCB + 0x30000) -#define S6_REG_NIDMA (S6_REG_SCB + 0x40000) -#define S6_REG_NS (S6_REG_SCB + 0x50000) -#define S6_REG_DDR (S6_REG_SCB + 0x60000) -#define S6_REG_GREG1 (S6_REG_SCB + 0x70000) -#define S6_REG_DP (S6_REG_SCB + 0x80000) -#define S6_REG_DPDMA (S6_REG_SCB + 0x90000) -#define S6_REG_EGIB (S6_REG_SCB + 0xA0000) -#define S6_REG_PCIE (S6_REG_SCB + 0xB0000) -#define S6_REG_I2S (S6_REG_SCB + 0xC0000) -#define S6_REG_GMAC (S6_REG_SCB + 0xD0000) -#define S6_REG_HIFDMA (S6_REG_SCB + 0xE0000) -#define S6_REG_GREG2 (S6_REG_SCB + 0xF0000) - -#define S6_REG_APB S6_REG_SCB -#define S6_REG_UART (S6_REG_APB + 0x0000) -#define S6_REG_INTC (S6_REG_APB + 0x2000) -#define S6_REG_SPI (S6_REG_APB + 0x3000) -#define S6_REG_I2C (S6_REG_APB + 0x4000) -#define S6_REG_GPIO (S6_REG_APB + 0x8000) - -/* Global register block */ - -#define S6_GREG1_PLL_LOCKCLEAR 0x000 -#define S6_GREG1_PLL_LOCK_SYS 0 -#define S6_GREG1_PLL_LOCK_IO 1 -#define S6_GREG1_PLL_LOCK_AIM 2 -#define S6_GREG1_PLL_LOCK_DP0 3 -#define S6_GREG1_PLL_LOCK_DP2 4 -#define S6_GREG1_PLL_LOCK_DDR 5 -#define S6_GREG1_PLL_LOCKSTAT 0x004 -#define S6_GREG1_PLL_LOCKSTAT_CURLOCK 0 -#define S6_GREG1_PLL_LOCKSTAT_EVERUNLCK 8 -#define S6_GREG1_PLLSEL 0x010 -#define S6_GREG1_PLLSEL_AIM 0 -#define S6_GREG1_PLLSEL_AIM_DDR2 0 -#define S6_GREG1_PLLSEL_AIM_300MHZ 1 -#define S6_GREG1_PLLSEL_AIM_240MHZ 2 -#define S6_GREG1_PLLSEL_AIM_200MHZ 3 -#define S6_GREG1_PLLSEL_AIM_150MHZ 4 -#define S6_GREG1_PLLSEL_AIM_120MHZ 5 -#define S6_GREG1_PLLSEL_AIM_40MHZ 6 -#define S6_GREG1_PLLSEL_AIM_PLLAIMREF 7 -#define S6_GREG1_PLLSEL_AIM_MASK 7 -#define S6_GREG1_PLLSEL_DDR 8 -#define S6_GREG1_PLLSEL_DDR_HS 0 -#define S6_GREG1_PLLSEL_DDR_333MHZ 1 -#define S6_GREG1_PLLSEL_DDR_250MHZ 2 -#define S6_GREG1_PLLSEL_DDR_200MHZ 3 -#define S6_GREG1_PLLSEL_DDR_167MHZ 4 -#define S6_GREG1_PLLSEL_DDR_100MHZ 5 -#define S6_GREG1_PLLSEL_DDR_33MHZ 6 -#define S6_GREG1_PLLSEL_DDR_PLLIOREF 7 -#define S6_GREG1_PLLSEL_DDR_MASK 7 -#define S6_GREG1_PLLSEL_GMAC 16 -#define S6_GREG1_PLLSEL_GMAC_125MHZ 0 -#define S6_GREG1_PLLSEL_GMAC_25MHZ 1 -#define S6_GREG1_PLLSEL_GMAC_2500KHZ 2 -#define S6_GREG1_PLLSEL_GMAC_EXTERN 3 -#define S6_GREG1_PLLSEL_GMAC_MASK 3 -#define S6_GREG1_PLLSEL_GMII 18 -#define S6_GREG1_PLLSEL_GMII_111MHZ 0 -#define S6_GREG1_PLLSEL_GMII_IOREF 1 -#define S6_GREG1_PLLSEL_GMII_NONE 2 -#define S6_GREG1_PLLSEL_GMII_125MHZ 3 -#define S6_GREG1_PLLSEL_GMII_MASK 3 -#define S6_GREG1_SYSUNLOCKCNT 0x020 -#define S6_GREG1_IOUNLOCKCNT 0x024 -#define S6_GREG1_AIMUNLOCKCNT 0x028 -#define S6_GREG1_DP0UNLOCKCNT 0x02C -#define S6_GREG1_DP2UNLOCKCNT 0x030 -#define S6_GREG1_DDRUNLOCKCNT 0x034 -#define S6_GREG1_CLKBAL0 0x040 -#define S6_GREG1_CLKBAL0_LSGB 0 -#define S6_GREG1_CLKBAL0_LSPX 8 -#define S6_GREG1_CLKBAL0_MEMDO 16 -#define S6_GREG1_CLKBAL0_HSXT1 24 -#define S6_GREG1_CLKBAL1 0x044 -#define S6_GREG1_CLKBAL1_HSISEF 0 -#define S6_GREG1_CLKBAL1_HSNI 8 -#define S6_GREG1_CLKBAL1_HSNS 16 -#define S6_GREG1_CLKBAL1_HSISEFCFG 24 -#define S6_GREG1_CLKBAL2 0x048 -#define S6_GREG1_CLKBAL2_LSNB 0 -#define S6_GREG1_CLKBAL2_LSSB 8 -#define S6_GREG1_CLKBAL2_LSREST 24 -#define S6_GREG1_CLKBAL3 0x04C -#define S6_GREG1_CLKBAL3_ISEFXAD 0 -#define S6_GREG1_CLKBAL3_ISEFLMS 8 -#define S6_GREG1_CLKBAL3_ISEFISEF 16 -#define S6_GREG1_CLKBAL3_DDRDD 24 -#define S6_GREG1_CLKBAL4 0x050 -#define S6_GREG1_CLKBAL4_DDRDP 0 -#define S6_GREG1_CLKBAL4_DDRDO 8 -#define S6_GREG1_CLKBAL4_DDRNB 16 -#define S6_GREG1_CLKBAL4_DDRLMS 24 -#define S6_GREG1_BLOCKENA 0x100 -#define S6_GREG1_BLOCK_DDR 0 -#define S6_GREG1_BLOCK_DP 1 -#define S6_GREG1_BLOCK_NSNI 2 -#define S6_GREG1_BLOCK_PCIE 3 -#define S6_GREG1_BLOCK_GMAC 4 -#define S6_GREG1_BLOCK_I2S 5 -#define S6_GREG1_BLOCK_EGIB 6 -#define S6_GREG1_BLOCK_SB 7 -#define S6_GREG1_BLOCK_XT1 8 -#define S6_GREG1_CLKGATE 0x104 -#define S6_GREG1_BGATE_AIMNORTH 9 -#define S6_GREG1_BGATE_AIMEAST 10 -#define S6_GREG1_BGATE_AIMWEST 11 -#define S6_GREG1_BGATE_AIMSOUTH 12 -#define S6_GREG1_CHIPRES 0x108 -#define S6_GREG1_CHIPRES_SOFTRES 0 -#define S6_GREG1_CHIPRES_LOSTLOCK 1 -#define S6_GREG1_RESETCAUSE 0x10C -#define S6_GREG1_RESETCAUSE_RESETN 0 -#define S6_GREG1_RESETCAUSE_GLOBAL 1 -#define S6_GREG1_RESETCAUSE_WDOGTIMER 2 -#define S6_GREG1_RESETCAUSE_SWCHIP 3 -#define S6_GREG1_RESETCAUSE_PLLSYSLOSS 4 -#define S6_GREG1_RESETCAUSE_PCIE 5 -#define S6_GREG1_RESETCAUSE_CREATEDGLOB 6 -#define S6_GREG1_REFCLOCKCNT 0x110 -#define S6_GREG1_RESETTIMER 0x114 -#define S6_GREG1_NMITIMER 0x118 -#define S6_GREG1_GLOBAL_TIMER 0x11C -#define S6_GREG1_TIMER0 0x180 -#define S6_GREG1_TIMER1 0x184 -#define S6_GREG1_UARTCLOCKSEL 0x204 -#define S6_GREG1_CHIPVERSPACKG 0x208 -#define S6_GREG1_CHIPVERSPACKG_CHIPVID 0 -#define S6_GREG1_CHIPVERSPACKG_PACKSEL 8 -#define S6_GREG1_ONDIETERMCTRL 0x20C -#define S6_GREG1_ONDIETERMCTRL_WEST 0 -#define S6_GREG1_ONDIETERMCTRL_NORTH 2 -#define S6_GREG1_ONDIETERMCTRL_EAST 4 -#define S6_GREG1_ONDIETERMCTRL_SOUTH 6 -#define S6_GREG1_ONDIETERMCTRL_NONE 0 -#define S6_GREG1_ONDIETERMCTRL_75OHM 2 -#define S6_GREG1_ONDIETERMCTRL_MASK 3 -#define S6_GREG1_BOOT_CFG0 0x210 -#define S6_GREG1_BOOT_CFG0_AIMSTRONG 1 -#define S6_GREG1_BOOT_CFG0_MINIBOOTDL 2 -#define S6_GREG1_BOOT_CFG0_OCDGPIO8SET 5 -#define S6_GREG1_BOOT_CFG0_OCDGPIOENA 6 -#define S6_GREG1_BOOT_CFG0_DOWNSTREAM 7 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV 8 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_300MHZ 1 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_240MHZ 2 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_200MHZ 3 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_150MHZ 4 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_120MHZ 5 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_40MHZ 6 -#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_MASK 7 -#define S6_GREG1_BOOT_CFG0_BALHSLMS 12 -#define S6_GREG1_BOOT_CFG0_BALHSNB 18 -#define S6_GREG1_BOOT_CFG0_BALHSXAD 24 -#define S6_GREG1_BOOT_CFG1 0x214 -#define S6_GREG1_BOOT_CFG1_PCIE1LANE 1 -#define S6_GREG1_BOOT_CFG1_MPLLPRESCALE 2 -#define S6_GREG1_BOOT_CFG1_MPLLNCY 4 -#define S6_GREG1_BOOT_CFG1_MPLLNCY5 9 -#define S6_GREG1_BOOT_CFG1_BALHSREST 14 -#define S6_GREG1_BOOT_CFG1_BALHSPSMEMS 20 -#define S6_GREG1_BOOT_CFG1_BALLSGI 26 -#define S6_GREG1_BOOT_CFG2 0x218 -#define S6_GREG1_BOOT_CFG2_PEID 0 -#define S6_GREG1_BOOT_CFG3 0x21C -#define S6_GREG1_DRAMBUSYHOLDOF 0x220 -#define S6_GREG1_DRAMBUSYHOLDOF_XT0 0 -#define S6_GREG1_DRAMBUSYHOLDOF_XT1 4 -#define S6_GREG1_DRAMBUSYHOLDOF_XT_MASK 7 -#define S6_GREG1_PCIEBAR1SIZE 0x224 -#define S6_GREG1_PCIEBAR2SIZE 0x228 -#define S6_GREG1_PCIEVENDOR 0x22C -#define S6_GREG1_PCIEDEVICE 0x230 -#define S6_GREG1_PCIEREV 0x234 -#define S6_GREG1_PCIECLASS 0x238 -#define S6_GREG1_XT1DCACHEMISS 0x240 -#define S6_GREG1_XT1ICACHEMISS 0x244 -#define S6_GREG1_HWSEMAPHORE(n) (0x400 + 4 * (n)) -#define S6_GREG1_HWSEMAPHORE_NB 16 - -/* peripheral interrupt numbers */ - -#define S6_INTC_GPIO(n) (n) /* 0..3 */ -#define S6_INTC_I2C 4 -#define S6_INTC_SPI 5 -#define S6_INTC_NB_ERR 6 -#define S6_INTC_DMA_LMSERR 7 -#define S6_INTC_DMA_LMSLOWWMRK(n) (8 + (n)) /* 0..11 */ -#define S6_INTC_DMA_LMSPENDCNT(n) (20 + (n)) /* 0..11 */ -#define S6_INTC_DMA HOSTLOWWMRK(n) (32 + (n)) /* 0..6 */ -#define S6_INTC_DMA_HOSTPENDCNT(n) (39 + (n)) /* 0..6 */ -#define S6_INTC_DMA_HOSTERR 46 -#define S6_INTC_UART(n) (47 + (n)) /* 0..1 */ -#define S6_INTC_XAD 49 -#define S6_INTC_NI_ERR 50 -#define S6_INTC_NI_INFIFOFULL 51 -#define S6_INTC_DMA_NIERR 52 -#define S6_INTC_DMA_NILOWWMRK(n) (53 + (n)) /* 0..3 */ -#define S6_INTC_DMA_NIPENDCNT(n) (57 + (n)) /* 0..3 */ -#define S6_INTC_DDR 61 -#define S6_INTC_NS_ERR 62 -#define S6_INTC_EFI_CFGERR 63 -#define S6_INTC_EFI_ISEFTEST 64 -#define S6_INTC_EFI_WRITEERR 65 -#define S6_INTC_NMI_TIMER 66 -#define S6_INTC_PLLLOCK_SYS 67 -#define S6_INTC_PLLLOCK_IO 68 -#define S6_INTC_PLLLOCK_AIM 69 -#define S6_INTC_PLLLOCK_DP0 70 -#define S6_INTC_PLLLOCK_DP2 71 -#define S6_INTC_I2S_ERR 72 -#define S6_INTC_GMAC_STAT 73 -#define S6_INTC_GMAC_ERR 74 -#define S6_INTC_GIB_ERR 75 -#define S6_INTC_PCIE_ERR 76 -#define S6_INTC_PCIE_MSI(n) (77 + (n)) /* 0..3 */ -#define S6_INTC_PCIE_INTA 81 -#define S6_INTC_PCIE_INTB 82 -#define S6_INTC_PCIE_INTC 83 -#define S6_INTC_PCIE_INTD 84 -#define S6_INTC_SW(n) (85 + (n)) /* 0..9 */ -#define S6_INTC_SW_ENABLE(n) (85 + 256 + (n)) -#define S6_INTC_DMA_DP_ERR 95 -#define S6_INTC_DMA_DPLOWWMRK(n) (96 + (n)) /* 0..3 */ -#define S6_INTC_DMA_DPPENDCNT(n) (100 + (n)) /* 0..3 */ -#define S6_INTC_DMA_DPTERMCNT(n) (104 + (n)) /* 0..3 */ -#define S6_INTC_TIMER0 108 -#define S6_INTC_TIMER1 109 -#define S6_INTC_DMA_HOSTTERMCNT(n) (110 + (n)) /* 0..6 */ - -#endif /* __XTENSA_S6000_HARDWARE_H */ diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h deleted file mode 100644 index 39ca751a6255..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/irq.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _XTENSA_S6000_IRQ_H -#define _XTENSA_S6000_IRQ_H - -#define VARIANT_NR_IRQS 8 /* GPIO interrupts */ - -extern void variant_irq_enable(unsigned int irq); - -#endif /* __XTENSA_S6000_IRQ_H */ diff --git a/arch/xtensa/variants/s6000/include/variant/tie-asm.h b/arch/xtensa/variants/s6000/include/variant/tie-asm.h deleted file mode 100644 index f02d0a3a2e20..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/tie-asm.h +++ /dev/null @@ -1,304 +0,0 @@ -/* - * This header file contains assembly-language definitions (assembly - * macros, etc.) for this specific Xtensa processor's TIE extensions - * and options. It is customized to this Xtensa processor configuration. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1999-2008 Tensilica Inc. - */ - -#ifndef _XTENSA_CORE_TIE_ASM_H -#define _XTENSA_CORE_TIE_ASM_H - -/* Selection parameter values for save-area save/restore macros: */ -/* Option vs. TIE: */ -#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */ -#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */ -/* Whether used automatically by compiler: */ -#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */ -#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */ -/* ABI handling across function calls: */ -#define XTHAL_SAS_CALR 0x0010 /* caller-saved */ -#define XTHAL_SAS_CALE 0x0020 /* callee-saved */ -#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */ -/* Misc */ -#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */ - - - -/* Macro to save all non-coprocessor (extra) custom TIE and optional state - * (not including zero-overhead loop registers). - * Save area ptr (clobbered): ptr (16 byte aligned) - * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed) - */ - .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL - xchal_sa_start \continue, \ofs - .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select - xchal_sa_align \ptr, 0, 1024-4, 4, 4 - rsr \at1, BR // boolean option - s32i \at1, \ptr, .Lxchal_ofs_ + 0 - .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 - .endif - .endm // xchal_ncp_store - -/* Macro to save all non-coprocessor (extra) custom TIE and optional state - * (not including zero-overhead loop registers). - * Save area ptr (clobbered): ptr (16 byte aligned) - * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed) - */ - .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL - xchal_sa_start \continue, \ofs - .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select - xchal_sa_align \ptr, 0, 1024-4, 4, 4 - l32i \at1, \ptr, .Lxchal_ofs_ + 0 - wsr \at1, BR // boolean option - .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 - .endif - .endm // xchal_ncp_load - - - -#define XCHAL_NCP_NUM_ATMPS 1 - - - -/* Macro to save the state of TIE coprocessor FPU. - * Save area ptr (clobbered): ptr (16 byte aligned) - * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP0_NUM_ATMPS needed) - */ -#define xchal_cp_FPU_store xchal_cp0_store -/* #define xchal_cp_FPU_store_a2 xchal_cp0_store a2 a3 a4 a5 a6 */ - .macro xchal_cp0_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL - xchal_sa_start \continue, \ofs - .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select - xchal_sa_align \ptr, 0, 0, 1, 16 - rur232 \at1 // FCR - s32i \at1, \ptr, 0 - rur233 \at1 // FSR - s32i \at1, \ptr, 4 - SSI f0, \ptr, 8 - SSI f1, \ptr, 12 - SSI f2, \ptr, 16 - SSI f3, \ptr, 20 - SSI f4, \ptr, 24 - SSI f5, \ptr, 28 - SSI f6, \ptr, 32 - SSI f7, \ptr, 36 - SSI f8, \ptr, 40 - SSI f9, \ptr, 44 - SSI f10, \ptr, 48 - SSI f11, \ptr, 52 - SSI f12, \ptr, 56 - SSI f13, \ptr, 60 - SSI f14, \ptr, 64 - SSI f15, \ptr, 68 - .set .Lxchal_ofs_, .Lxchal_ofs_ + 72 - .endif - .endm // xchal_cp0_store - -/* Macro to restore the state of TIE coprocessor FPU. - * Save area ptr (clobbered): ptr (16 byte aligned) - * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP0_NUM_ATMPS needed) - */ -#define xchal_cp_FPU_load xchal_cp0_load -/* #define xchal_cp_FPU_load_a2 xchal_cp0_load a2 a3 a4 a5 a6 */ - .macro xchal_cp0_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL - xchal_sa_start \continue, \ofs - .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select - xchal_sa_align \ptr, 0, 0, 1, 16 - l32i \at1, \ptr, 0 - wur232 \at1 // FCR - l32i \at1, \ptr, 4 - wur233 \at1 // FSR - LSI f0, \ptr, 8 - LSI f1, \ptr, 12 - LSI f2, \ptr, 16 - LSI f3, \ptr, 20 - LSI f4, \ptr, 24 - LSI f5, \ptr, 28 - LSI f6, \ptr, 32 - LSI f7, \ptr, 36 - LSI f8, \ptr, 40 - LSI f9, \ptr, 44 - LSI f10, \ptr, 48 - LSI f11, \ptr, 52 - LSI f12, \ptr, 56 - LSI f13, \ptr, 60 - LSI f14, \ptr, 64 - LSI f15, \ptr, 68 - .set .Lxchal_ofs_, .Lxchal_ofs_ + 72 - .endif - .endm // xchal_cp0_load - -#define XCHAL_CP0_NUM_ATMPS 1 - -/* Macro to save the state of TIE coprocessor XAD. - * Save area ptr (clobbered): ptr (16 byte aligned) - * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP6_NUM_ATMPS needed) - */ -#define xchal_cp_XAD_store xchal_cp6_store -/* #define xchal_cp_XAD_store_a2 xchal_cp6_store a2 a3 a4 a5 a6 */ - .macro xchal_cp6_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL - xchal_sa_start \continue, \ofs - .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select - xchal_sa_align \ptr, 0, 0, 1, 16 - rur0 \at1 // LDCBHI - s32i \at1, \ptr, 0 - rur1 \at1 // LDCBLO - s32i \at1, \ptr, 4 - rur2 \at1 // STCBHI - s32i \at1, \ptr, 8 - rur3 \at1 // STCBLO - s32i \at1, \ptr, 12 - rur8 \at1 // LDBRBASE - s32i \at1, \ptr, 16 - rur9 \at1 // LDBROFF - s32i \at1, \ptr, 20 - rur10 \at1 // LDBRINC - s32i \at1, \ptr, 24 - rur11 \at1 // STBRBASE - s32i \at1, \ptr, 28 - rur12 \at1 // STBROFF - s32i \at1, \ptr, 32 - rur13 \at1 // STBRINC - s32i \at1, \ptr, 36 - rur24 \at1 // SCRATCH0 - s32i \at1, \ptr, 40 - rur25 \at1 // SCRATCH1 - s32i \at1, \ptr, 44 - rur26 \at1 // SCRATCH2 - s32i \at1, \ptr, 48 - rur27 \at1 // SCRATCH3 - s32i \at1, \ptr, 52 - WRAS128I wra0, \ptr, 64 - WRAS128I wra1, \ptr, 80 - WRAS128I wra2, \ptr, 96 - WRAS128I wra3, \ptr, 112 - WRAS128I wra4, \ptr, 128 - WRAS128I wra5, \ptr, 144 - WRAS128I wra6, \ptr, 160 - WRAS128I wra7, \ptr, 176 - WRAS128I wra8, \ptr, 192 - WRAS128I wra9, \ptr, 208 - WRAS128I wra10, \ptr, 224 - WRAS128I wra11, \ptr, 240 - WRAS128I wra12, \ptr, 256 - WRAS128I wra13, \ptr, 272 - WRAS128I wra14, \ptr, 288 - WRAS128I wra15, \ptr, 304 - WRBS128I wrb0, \ptr, 320 - WRBS128I wrb1, \ptr, 336 - WRBS128I wrb2, \ptr, 352 - WRBS128I wrb3, \ptr, 368 - WRBS128I wrb4, \ptr, 384 - WRBS128I wrb5, \ptr, 400 - WRBS128I wrb6, \ptr, 416 - WRBS128I wrb7, \ptr, 432 - WRBS128I wrb8, \ptr, 448 - WRBS128I wrb9, \ptr, 464 - WRBS128I wrb10, \ptr, 480 - WRBS128I wrb11, \ptr, 496 - WRBS128I wrb12, \ptr, 512 - WRBS128I wrb13, \ptr, 528 - WRBS128I wrb14, \ptr, 544 - WRBS128I wrb15, \ptr, 560 - .set .Lxchal_ofs_, .Lxchal_ofs_ + 576 - .endif - .endm // xchal_cp6_store - -/* Macro to restore the state of TIE coprocessor XAD. - * Save area ptr (clobbered): ptr (16 byte aligned) - * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP6_NUM_ATMPS needed) - */ -#define xchal_cp_XAD_load xchal_cp6_load -/* #define xchal_cp_XAD_load_a2 xchal_cp6_load a2 a3 a4 a5 a6 */ - .macro xchal_cp6_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL - xchal_sa_start \continue, \ofs - .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select - xchal_sa_align \ptr, 0, 0, 1, 16 - l32i \at1, \ptr, 0 - wur0 \at1 // LDCBHI - l32i \at1, \ptr, 4 - wur1 \at1 // LDCBLO - l32i \at1, \ptr, 8 - wur2 \at1 // STCBHI - l32i \at1, \ptr, 12 - wur3 \at1 // STCBLO - l32i \at1, \ptr, 16 - wur8 \at1 // LDBRBASE - l32i \at1, \ptr, 20 - wur9 \at1 // LDBROFF - l32i \at1, \ptr, 24 - wur10 \at1 // LDBRINC - l32i \at1, \ptr, 28 - wur11 \at1 // STBRBASE - l32i \at1, \ptr, 32 - wur12 \at1 // STBROFF - l32i \at1, \ptr, 36 - wur13 \at1 // STBRINC - l32i \at1, \ptr, 40 - wur24 \at1 // SCRATCH0 - l32i \at1, \ptr, 44 - wur25 \at1 // SCRATCH1 - l32i \at1, \ptr, 48 - wur26 \at1 // SCRATCH2 - l32i \at1, \ptr, 52 - wur27 \at1 // SCRATCH3 - WRBL128I wrb0, \ptr, 320 - WRBL128I wrb1, \ptr, 336 - WRBL128I wrb2, \ptr, 352 - WRBL128I wrb3, \ptr, 368 - WRBL128I wrb4, \ptr, 384 - WRBL128I wrb5, \ptr, 400 - WRBL128I wrb6, \ptr, 416 - WRBL128I wrb7, \ptr, 432 - WRBL128I wrb8, \ptr, 448 - WRBL128I wrb9, \ptr, 464 - WRBL128I wrb10, \ptr, 480 - WRBL128I wrb11, \ptr, 496 - WRBL128I wrb12, \ptr, 512 - WRBL128I wrb13, \ptr, 528 - WRBL128I wrb14, \ptr, 544 - WRBL128I wrb15, \ptr, 560 - WRAL128I wra0, \ptr, 64 - WRAL128I wra1, \ptr, 80 - WRAL128I wra2, \ptr, 96 - WRAL128I wra3, \ptr, 112 - WRAL128I wra4, \ptr, 128 - WRAL128I wra5, \ptr, 144 - WRAL128I wra6, \ptr, 160 - WRAL128I wra7, \ptr, 176 - WRAL128I wra8, \ptr, 192 - WRAL128I wra9, \ptr, 208 - WRAL128I wra10, \ptr, 224 - WRAL128I wra11, \ptr, 240 - WRAL128I wra12, \ptr, 256 - WRAL128I wra13, \ptr, 272 - WRAL128I wra14, \ptr, 288 - WRAL128I wra15, \ptr, 304 - .set .Lxchal_ofs_, .Lxchal_ofs_ + 576 - .endif - .endm // xchal_cp6_load - -#define XCHAL_CP6_NUM_ATMPS 1 -#define XCHAL_SA_NUM_ATMPS 1 - - /* Empty macros for unconfigured coprocessors: */ - .macro xchal_cp1_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp1_load p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm - -#endif /*_XTENSA_CORE_TIE_ASM_H*/ - diff --git a/arch/xtensa/variants/s6000/include/variant/tie.h b/arch/xtensa/variants/s6000/include/variant/tie.h deleted file mode 100644 index be7ea843d5df..000000000000 --- a/arch/xtensa/variants/s6000/include/variant/tie.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - * This header file describes this specific Xtensa processor's TIE extensions - * that extend basic Xtensa core functionality. It is customized to this - * Xtensa processor configuration. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1999-2008 Tensilica Inc. - */ - -#ifndef _XTENSA_CORE_TIE_H -#define _XTENSA_CORE_TIE_H - -#define XCHAL_CP_NUM 2 /* number of coprocessors */ -#define XCHAL_CP_MAX 7 /* max CP ID + 1 (0 if none) */ -#define XCHAL_CP_MASK 0x41 /* bitmask of all CPs by ID */ -#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ - -/* Basic parameters of each coprocessor: */ -#define XCHAL_CP0_NAME "FPU" -#define XCHAL_CP0_IDENT FPU -#define XCHAL_CP0_SA_SIZE 72 /* size of state save area */ -#define XCHAL_CP0_SA_ALIGN 4 /* min alignment of save area */ -#define XCHAL_CP_ID_FPU 0 /* coprocessor ID (0..7) */ -#define XCHAL_CP6_NAME "XAD" -#define XCHAL_CP6_IDENT XAD -#define XCHAL_CP6_SA_SIZE 576 /* size of state save area */ -#define XCHAL_CP6_SA_ALIGN 16 /* min alignment of save area */ -#define XCHAL_CP_ID_XAD 6 /* coprocessor ID (0..7) */ - -/* Filler info for unassigned coprocessors, to simplify arrays etc: */ -#define XCHAL_CP1_SA_SIZE 0 -#define XCHAL_CP1_SA_ALIGN 1 -#define XCHAL_CP2_SA_SIZE 0 -#define XCHAL_CP2_SA_ALIGN 1 -#define XCHAL_CP3_SA_SIZE 0 -#define XCHAL_CP3_SA_ALIGN 1 -#define XCHAL_CP4_SA_SIZE 0 -#define XCHAL_CP4_SA_ALIGN 1 -#define XCHAL_CP5_SA_SIZE 0 -#define XCHAL_CP5_SA_ALIGN 1 -#define XCHAL_CP7_SA_SIZE 0 -#define XCHAL_CP7_SA_ALIGN 1 - -/* Save area for non-coprocessor optional and custom (TIE) state: */ -#define XCHAL_NCP_SA_SIZE 4 -#define XCHAL_NCP_SA_ALIGN 4 - -/* Total save area for optional and custom state (NCP + CPn): */ -#define XCHAL_TOTAL_SA_SIZE 672 /* with 16-byte align padding */ -#define XCHAL_TOTAL_SA_ALIGN 16 /* actual minimum alignment */ - -/* - * Detailed contents of save areas. - * NOTE: caller must define the XCHAL_SA_REG macro (not defined here) - * before expanding the XCHAL_xxx_SA_LIST() macros. - * - * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize, - * dbnum,base,regnum,bitsz,gapsz,reset,x...) - * - * s = passed from XCHAL_*_LIST(s), eg. to select how to expand - * ccused = set if used by compiler without special options or code - * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global) - * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg) - * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg) - * name = lowercase reg name (no quotes) - * galign = group byte alignment (power of 2) (galign >= align) - * align = register byte alignment (power of 2) - * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz) - * (not including any pad bytes required to galign this or next reg) - * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>) - * base = reg shortname w/o index (or sr=special, ur=TIE user reg) - * regnum = reg index in regfile, or special/TIE-user reg number - * bitsz = number of significant bits (regfile width, or ur/sr mask bits) - * gapsz = intervening bits, if bitsz bits not stored contiguously - * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize) - * reset = register reset value (or 0 if undefined at reset) - * x = reserved for future use (0 until then) - * - * To filter out certain registers, e.g. to expand only the non-global - * registers used by the compiler, you can do something like this: - * - * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p) - * #define SELCC0(p...) - * #define SELCC1(abikind,p...) SELAK##abikind(p) - * #define SELAK0(p...) REG(p) - * #define SELAK1(p...) REG(p) - * #define SELAK2(p...) - * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \ - * ...what you want to expand... - */ - -#define XCHAL_NCP_SA_NUM 1 -#define XCHAL_NCP_SA_LIST(s) \ - XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) - -#define XCHAL_CP0_SA_NUM 18 -#define XCHAL_CP0_SA_LIST(s) \ - XCHAL_SA_REG(s,0,0,1,0, fcr, 4, 4, 4,0x03E8, ur,232, 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, fsr, 4, 4, 4,0x03E9, ur,233, 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f0, 4, 4, 4,0x0030, f,0 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f1, 4, 4, 4,0x0031, f,1 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f2, 4, 4, 4,0x0032, f,2 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f3, 4, 4, 4,0x0033, f,3 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f4, 4, 4, 4,0x0034, f,4 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f5, 4, 4, 4,0x0035, f,5 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f6, 4, 4, 4,0x0036, f,6 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f7, 4, 4, 4,0x0037, f,7 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f8, 4, 4, 4,0x0038, f,8 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f9, 4, 4, 4,0x0039, f,9 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f10, 4, 4, 4,0x003A, f,10 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f11, 4, 4, 4,0x003B, f,11 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f12, 4, 4, 4,0x003C, f,12 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f13, 4, 4, 4,0x003D, f,13 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f14, 4, 4, 4,0x003E, f,14 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, f15, 4, 4, 4,0x003F, f,15 , 32,0,0,0) - -#define XCHAL_CP1_SA_NUM 0 -#define XCHAL_CP1_SA_LIST(s) /* empty */ - -#define XCHAL_CP2_SA_NUM 0 -#define XCHAL_CP2_SA_LIST(s) /* empty */ - -#define XCHAL_CP3_SA_NUM 0 -#define XCHAL_CP3_SA_LIST(s) /* empty */ - -#define XCHAL_CP4_SA_NUM 0 -#define XCHAL_CP4_SA_LIST(s) /* empty */ - -#define XCHAL_CP5_SA_NUM 0 -#define XCHAL_CP5_SA_LIST(s) /* empty */ - -#define XCHAL_CP6_SA_NUM 46 -#define XCHAL_CP6_SA_LIST(s) \ - XCHAL_SA_REG(s,0,0,1,0, ldcbhi,16, 4, 4,0x0300, ur,0 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, ldcblo, 4, 4, 4,0x0301, ur,1 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, stcbhi, 4, 4, 4,0x0302, ur,2 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, stcblo, 4, 4, 4,0x0303, ur,3 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, ldbrbase, 4, 4, 4,0x0308, ur,8 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, ldbroff, 4, 4, 4,0x0309, ur,9 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, ldbrinc, 4, 4, 4,0x030A, ur,10 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, stbrbase, 4, 4, 4,0x030B, ur,11 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, stbroff, 4, 4, 4,0x030C, ur,12 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, stbrinc, 4, 4, 4,0x030D, ur,13 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, scratch0, 4, 4, 4,0x0318, ur,24 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, scratch1, 4, 4, 4,0x0319, ur,25 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, scratch2, 4, 4, 4,0x031A, ur,26 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,1,0, scratch3, 4, 4, 4,0x031B, ur,27 , 32,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra0,16,16,16,0x1010, wra,0 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra1,16,16,16,0x1011, wra,1 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra2,16,16,16,0x1012, wra,2 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra3,16,16,16,0x1013, wra,3 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra4,16,16,16,0x1014, wra,4 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra5,16,16,16,0x1015, wra,5 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra6,16,16,16,0x1016, wra,6 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra7,16,16,16,0x1017, wra,7 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra8,16,16,16,0x1018, wra,8 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra9,16,16,16,0x1019, wra,9 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra10,16,16,16,0x101A, wra,10 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra11,16,16,16,0x101B, wra,11 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra12,16,16,16,0x101C, wra,12 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra13,16,16,16,0x101D, wra,13 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra14,16,16,16,0x101E, wra,14 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wra15,16,16,16,0x101F, wra,15 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb0,16,16,16,0x1020, wrb,0 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb1,16,16,16,0x1021, wrb,1 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb2,16,16,16,0x1022, wrb,2 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb3,16,16,16,0x1023, wrb,3 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb4,16,16,16,0x1024, wrb,4 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb5,16,16,16,0x1025, wrb,5 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb6,16,16,16,0x1026, wrb,6 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb7,16,16,16,0x1027, wrb,7 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb8,16,16,16,0x1028, wrb,8 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb9,16,16,16,0x1029, wrb,9 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb10,16,16,16,0x102A, wrb,10 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb11,16,16,16,0x102B, wrb,11 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb12,16,16,16,0x102C, wrb,12 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb13,16,16,16,0x102D, wrb,13 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb14,16,16,16,0x102E, wrb,14 ,128,0,0,0) \ - XCHAL_SA_REG(s,0,0,2,0, wrb15,16,16,16,0x102F, wrb,15 ,128,0,0,0) - -#define XCHAL_CP7_SA_NUM 0 -#define XCHAL_CP7_SA_LIST(s) /* empty */ - -/* Byte length of instruction from its first nibble (op0 field), per FLIX. */ -#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8 - -#endif /*_XTENSA_CORE_TIE_H*/ - diff --git a/arch/xtensa/variants/s6000/irq.c b/arch/xtensa/variants/s6000/irq.c deleted file mode 100644 index 81a241e79075..000000000000 --- a/arch/xtensa/variants/s6000/irq.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * s6000 irq crossbar - * - * Copyright (c) 2009 emlix GmbH - * Authors: Johannes Weiner <hannes@cmpxchg.org> - * Oskar Schirmer <oskar@scara.com> - */ -#include <linux/io.h> -#include <asm/irq.h> -#include <variant/hardware.h> - -/* S6_REG_INTC */ -#define INTC_STATUS 0x000 -#define INTC_RAW 0x010 -#define INTC_STATUS_AG 0x100 -#define INTC_CFG(n) (0x200 + 4 * (n)) - -/* - * The s6000 has a crossbar that multiplexes interrupt output lines - * from the peripherals to input lines on the xtensa core. - * - * We leave the mapping decisions to the platform as it depends on the - * actually connected peripherals which distribution makes sense. - */ -extern const signed char *platform_irq_mappings[NR_IRQS]; - -static unsigned long scp_to_intc_enable[] = { -#define TO_INTC_ENABLE(n) (((n) << 1) + 1) - TO_INTC_ENABLE(0), - TO_INTC_ENABLE(1), - TO_INTC_ENABLE(2), - TO_INTC_ENABLE(3), - TO_INTC_ENABLE(4), - TO_INTC_ENABLE(5), - TO_INTC_ENABLE(6), - TO_INTC_ENABLE(7), - TO_INTC_ENABLE(8), - TO_INTC_ENABLE(9), - TO_INTC_ENABLE(10), - TO_INTC_ENABLE(11), - TO_INTC_ENABLE(12), - -1, - -1, - TO_INTC_ENABLE(13), - -1, - TO_INTC_ENABLE(14), - -1, - TO_INTC_ENABLE(15), -#undef TO_INTC_ENABLE -}; - -static void irq_set(unsigned int irq, int enable) -{ - unsigned long en; - const signed char *m = platform_irq_mappings[irq]; - - if (!m) - return; - en = enable ? scp_to_intc_enable[irq] : 0; - while (*m >= 0) { - writel(en, S6_REG_INTC + INTC_CFG(*m)); - m++; - } -} - -void variant_irq_enable(unsigned int irq) -{ - irq_set(irq, 1); -} - -void variant_irq_disable(unsigned int irq) -{ - irq_set(irq, 0); -} |