summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/boot/compressed/head.S20
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts4
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts4
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts4
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts4
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi21
-rw-r--r--arch/arm/boot/dts/omap3-ldp.dts23
-rw-r--r--arch/arm/boot/dts/omap3.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d31.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d33.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d34.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d35.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d36.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3xcm.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts1
-rw-r--r--arch/arm/boot/dts/tegra114-roth.dts9
-rw-r--r--arch/arm/boot/dts/tegra114-tn7.dts5
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi7
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts1
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-iris-512.dts5
-rw-r--r--arch/arm/boot/dts/tegra20-medcom-wide.dts4
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-tamonten.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts1
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30-apalis-eval.dts4
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts1
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30-colibri-eval-v3.dts3
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi8
-rw-r--r--arch/arm/boot/dts/vf610-cosmic.dts19
-rw-r--r--arch/arm/boot/dts/zynq-parallella.dts4
-rw-r--r--arch/arm/common/edma.c9
-rw-r--r--arch/arm/configs/exynos_defconfig2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig4
-rw-r--r--arch/arm/configs/socfpga_defconfig71
-rw-r--r--arch/arm/include/asm/thread_info.h11
-rw-r--r--arch/arm/kernel/traps.c31
-rw-r--r--arch/arm/kvm/mmu.c10
-rw-r--r--arch/arm/mach-at91/include/mach/atmel-mci.h17
-rw-r--r--arch/arm/mach-ep93xx/dma.c12
-rw-r--r--arch/arm/mach-exynos/pmu.c8
-rw-r--r--arch/arm/mach-imx/clk-vf610.c134
-rw-r--r--arch/arm/mach-imx/common.h4
-rw-r--r--arch/arm/mach-imx/gpc.c42
-rw-r--r--arch/arm/mach-imx/pm-imx6.c5
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h4
-rw-r--r--arch/arm/mach-mvebu/board-v7.c2
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c4
-rw-r--r--arch/arm/mach-omap2/devices.c36
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/hsmmc.c158
-rw-r--r--arch/arm/mach-omap2/hsmmc.h9
-rw-r--r--arch/arm/mach-omap2/mmc.h10
-rw-r--r--arch/arm/mach-omap2/omap4-common.c1
-rw-r--r--arch/arm/mach-omap2/omap_device.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c8
-rw-r--r--arch/arm/mach-omap2/serial.c3
-rw-r--r--arch/arm/mach-pxa/include/mach/addr-map.h5
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7740.c9
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7790.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c20
-rw-r--r--arch/arm/mach-tegra/irq.c22
-rw-r--r--arch/arm/mach-tegra/reset-handler.S1
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xscale.S4
-rw-r--r--arch/arm/plat-orion/gpio.c36
-rw-r--r--arch/arm64/Kconfig187
-rw-r--r--arch/arm64/Kconfig.debug12
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi10
-rw-r--r--arch/arm64/configs/defconfig26
-rw-r--r--arch/arm64/crypto/Kconfig5
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c4
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c112
-rw-r--r--arch/arm64/crypto/aes-ce-setkey.h5
-rw-r--r--arch/arm64/crypto/aes-glue.c18
-rw-r--r--arch/arm64/include/asm/alternative-asm.h29
-rw-r--r--arch/arm64/include/asm/alternative.h44
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h73
-rw-r--r--arch/arm64/include/asm/compat.h7
-rw-r--r--arch/arm64/include/asm/cpu.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/cputype.h5
-rw-r--r--arch/arm64/include/asm/dmi.h31
-rw-r--r--arch/arm64/include/asm/fixmap.h8
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/insn.h10
-rw-r--r--arch/arm64/include/asm/io.h23
-rw-r--r--arch/arm64/include/asm/irq.h3
-rw-r--r--arch/arm64/include/asm/kvm_arm.h21
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/opcodes.h1
-rw-r--r--arch/arm64/include/asm/percpu.h215
-rw-r--r--arch/arm64/include/asm/pgalloc.h8
-rw-r--r--arch/arm64/include/asm/seccomp.h25
-rw-r--r--arch/arm64/include/asm/tlb.h67
-rw-r--r--arch/arm64/include/asm/traps.h16
-rw-r--r--arch/arm64/include/asm/unistd.h3
-rw-r--r--arch/arm64/include/asm/unistd32.h5
-rw-r--r--arch/arm64/kernel/Makefile7
-rw-r--r--arch/arm64/kernel/alternative.c85
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c553
-rw-r--r--arch/arm64/kernel/cpu_errata.c111
-rw-r--r--arch/arm64/kernel/cpuinfo.c23
-rw-r--r--arch/arm64/kernel/efi-entry.S30
-rw-r--r--arch/arm64/kernel/efi.c37
-rw-r--r--arch/arm64/kernel/entry-ftrace.S21
-rw-r--r--arch/arm64/kernel/entry.S138
-rw-r--r--arch/arm64/kernel/head.S434
-rw-r--r--arch/arm64/kernel/insn.c31
-rw-r--r--arch/arm64/kernel/io.c66
-rw-r--r--arch/arm64/kernel/irq.c2
-rw-r--r--arch/arm64/kernel/jump_label.c23
-rw-r--r--arch/arm64/kernel/module.c18
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c108
-rw-r--r--arch/arm64/kernel/signal32.c6
-rw-r--r--arch/arm64/kernel/sleep.S36
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/sys_compat.c49
-rw-r--r--arch/arm64/kernel/topology.c7
-rw-r--r--arch/arm64/kernel/trace-events-emulation.h35
-rw-r--r--arch/arm64/kernel/traps.c66
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S33
-rw-r--r--arch/arm64/kvm/hyp.S4
-rw-r--r--arch/arm64/kvm/sys_regs.c9
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/mm/Makefile1
-rw-r--r--arch/arm64/mm/cache.S10
-rw-r--r--arch/arm64/mm/dump.c332
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/ioremap.c93
-rw-r--r--arch/arm64/mm/mm.h1
-rw-r--r--arch/arm64/mm/mmap.c12
-rw-r--r--arch/arm64/mm/mmu.c96
-rw-r--r--arch/arm64/mm/pgd.c4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c13
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c2
-rw-r--r--arch/avr32/mach-at32ap/include/mach/atmel-mci.h17
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/m68k/atari/config.c27
-rw-r--r--arch/m68k/atari/stdma.c61
-rw-r--r--arch/m68k/include/asm/atari_stdma.h4
-rw-r--r--arch/m68k/include/asm/macintosh.h4
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mac/config.c146
-rw-r--r--arch/m68k/mm/init.c1
-rw-r--r--arch/m68k/sun3/config.c60
-rw-r--r--arch/microblaze/include/asm/tlb.h3
-rw-r--r--arch/mips/Kconfig14
-rw-r--r--arch/mips/Makefile9
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/include/asm/asmmacro-32.h6
-rw-r--r--arch/mips/include/asm/asmmacro.h18
-rw-r--r--arch/mips/include/asm/fpregdef.h14
-rw-r--r--arch/mips/include/asm/fpu.h4
-rw-r--r--arch/mips/include/asm/jump_label.h8
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h13
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/uaccess.h18
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/bmips_vec.S3
-rw-r--r--arch/mips/kernel/branch.c8
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c40
-rw-r--r--arch/mips/kernel/genex.S1
-rw-r--r--arch/mips/kernel/jump_label.c42
-rw-r--r--arch/mips/kernel/r2300_fpu.S6
-rw-r--r--arch/mips/kernel/r2300_switch.S5
-rw-r--r--arch/mips/kernel/r4k_fpu.S27
-rw-r--r--arch/mips/kernel/r4k_switch.S15
-rw-r--r--arch/mips/kernel/r6000_fpu.S5
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/setup.c7
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/lib/memcpy.S1
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c4
-rw-r--r--arch/mips/lib/strnlen_user.S6
-rw-r--r--arch/mips/loongson/common/Makefile3
-rw-r--r--arch/mips/loongson/loongson-3/numa.c1
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlbex.c10
-rw-r--r--arch/mips/mti-sead3/sead3-leds.c8
-rw-r--r--arch/mips/netlogic/xlp/Makefile12
-rw-r--r--arch/mips/oprofile/backtrace.c2
-rw-r--r--arch/mips/pci/msi-xlp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c1
-rw-r--r--arch/nios2/Kconfig206
-rw-r--r--arch/nios2/Kconfig.debug17
-rw-r--r--arch/nios2/Makefile73
-rw-r--r--arch/nios2/boot/Makefile52
-rw-r--r--arch/nios2/boot/dts/3c120_devboard.dts164
-rw-r--r--arch/nios2/boot/install.sh52
-rw-r--r--arch/nios2/boot/linked_dtb.S19
-rw-r--r--arch/nios2/configs/3c120_defconfig77
-rw-r--r--arch/nios2/include/asm/Kbuild66
-rw-r--r--arch/nios2/include/asm/asm-macros.h309
-rw-r--r--arch/nios2/include/asm/asm-offsets.h20
-rw-r--r--arch/nios2/include/asm/cache.h36
-rw-r--r--arch/nios2/include/asm/cacheflush.h52
-rw-r--r--arch/nios2/include/asm/checksum.h78
-rw-r--r--arch/nios2/include/asm/cmpxchg.h61
-rw-r--r--arch/nios2/include/asm/cpuinfo.h57
-rw-r--r--arch/nios2/include/asm/delay.h21
-rw-r--r--arch/nios2/include/asm/dma-mapping.h140
-rw-r--r--arch/nios2/include/asm/elf.h101
-rw-r--r--arch/nios2/include/asm/entry.h120
-rw-r--r--arch/nios2/include/asm/io.h61
-rw-r--r--arch/nios2/include/asm/irq.h28
-rw-r--r--arch/nios2/include/asm/irqflags.h72
-rw-r--r--arch/nios2/include/asm/linkage.h25
-rw-r--r--arch/nios2/include/asm/mmu.h16
-rw-r--r--arch/nios2/include/asm/mmu_context.h66
-rw-r--r--arch/nios2/include/asm/mutex.h1
-rw-r--r--arch/nios2/include/asm/page.h109
-rw-r--r--arch/nios2/include/asm/pgalloc.h86
-rw-r--r--arch/nios2/include/asm/pgtable-bits.h35
-rw-r--r--arch/nios2/include/asm/pgtable.h310
-rw-r--r--arch/nios2/include/asm/processor.h103
-rw-r--r--arch/nios2/include/asm/ptrace.h33
-rw-r--r--arch/nios2/include/asm/registers.h71
-rw-r--r--arch/nios2/include/asm/setup.h38
-rw-r--r--arch/nios2/include/asm/signal.h22
-rw-r--r--arch/nios2/include/asm/string.h24
-rw-r--r--arch/nios2/include/asm/switch_to.h31
-rw-r--r--arch/nios2/include/asm/syscall.h138
-rw-r--r--arch/nios2/include/asm/syscalls.h25
-rw-r--r--arch/nios2/include/asm/thread_info.h120
-rw-r--r--arch/nios2/include/asm/timex.h24
-rw-r--r--arch/nios2/include/asm/tlb.h34
-rw-r--r--arch/nios2/include/asm/tlbflush.h46
-rw-r--r--arch/nios2/include/asm/traps.h19
-rw-r--r--arch/nios2/include/asm/uaccess.h231
-rw-r--r--arch/nios2/include/asm/ucontext.h32
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild4
-rw-r--r--arch/nios2/include/uapi/asm/byteorder.h22
-rw-r--r--arch/nios2/include/uapi/asm/elf.h67
-rw-r--r--arch/nios2/include/uapi/asm/ptrace.h120
-rw-r--r--arch/nios2/include/uapi/asm/sigcontext.h28
-rw-r--r--arch/nios2/include/uapi/asm/signal.h23
-rw-r--r--arch/nios2/include/uapi/asm/swab.h37
-rw-r--r--arch/nios2/include/uapi/asm/unistd.h25
-rw-r--r--arch/nios2/kernel/Makefile24
-rw-r--r--arch/nios2/kernel/asm-offsets.c87
-rw-r--r--arch/nios2/kernel/cpuinfo.c197
-rw-r--r--arch/nios2/kernel/entry.S555
-rw-r--r--arch/nios2/kernel/head.S175
-rw-r--r--arch/nios2/kernel/insnemu.S592
-rw-r--r--arch/nios2/kernel/irq.c93
-rw-r--r--arch/nios2/kernel/misaligned.c256
-rw-r--r--arch/nios2/kernel/module.c138
-rw-r--r--arch/nios2/kernel/nios2_ksyms.c33
-rw-r--r--arch/nios2/kernel/process.c258
-rw-r--r--arch/nios2/kernel/prom.c65
-rw-r--r--arch/nios2/kernel/ptrace.c166
-rw-r--r--arch/nios2/kernel/setup.c218
-rw-r--r--arch/nios2/kernel/signal.c323
-rw-r--r--arch/nios2/kernel/sys_nios2.c53
-rw-r--r--arch/nios2/kernel/syscall_table.c29
-rw-r--r--arch/nios2/kernel/time.c308
-rw-r--r--arch/nios2/kernel/traps.c185
-rw-r--r--arch/nios2/kernel/vmlinux.lds.S75
-rw-r--r--arch/nios2/lib/Makefile8
-rw-r--r--arch/nios2/lib/delay.c52
-rw-r--r--arch/nios2/lib/memcpy.c202
-rw-r--r--arch/nios2/lib/memmove.c82
-rw-r--r--arch/nios2/lib/memset.c81
-rw-r--r--arch/nios2/mm/Makefile14
-rw-r--r--arch/nios2/mm/cacheflush.c271
-rw-r--r--arch/nios2/mm/dma-mapping.c186
-rw-r--r--arch/nios2/mm/extable.c25
-rw-r--r--arch/nios2/mm/fault.c251
-rw-r--r--arch/nios2/mm/init.c142
-rw-r--r--arch/nios2/mm/ioremap.c187
-rw-r--r--arch/nios2/mm/mmu_context.c116
-rw-r--r--arch/nios2/mm/pgtable.c74
-rw-r--r--arch/nios2/mm/tlb.c275
-rw-r--r--arch/nios2/mm/uaccess.c163
-rw-r--r--arch/nios2/platform/Kconfig.platform129
-rw-r--r--arch/nios2/platform/Makefile1
-rw-r--r--arch/nios2/platform/platform.c46
-rw-r--r--arch/parisc/include/asm/uaccess.h19
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h8
-rw-r--r--arch/parisc/include/uapi/asm/msgbuf.h8
-rw-r--r--arch/parisc/include/uapi/asm/sembuf.h6
-rw-r--r--arch/parisc/include/uapi/asm/shmbuf.h35
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/include/asm/fadump.h52
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc.h3
-rw-r--r--arch/powerpc/include/asm/tlb.h1
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/fadump.c114
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/kernel/vdso32/getcpu.S4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c59
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c20
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c14
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/xmon/xmon.c6
-rw-r--r--arch/s390/configs/default_defconfig36
-rw-r--r--arch/s390/configs/gcov_defconfig25
-rw-r--r--arch/s390/configs/performance_defconfig30
-rw-r--r--arch/s390/configs/zfcpdump_defconfig10
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S12
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S14
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S13
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S6
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h12
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/include/uapi/asm/swab.h12
-rw-r--r--arch/sparc/kernel/pci_schizo.c6
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/lib/atomic32.c27
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/head_32.S5
-rw-r--r--arch/x86/boot/compressed/head_64.S5
-rw-r--r--arch/x86/boot/compressed/misc.c13
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c9
-rw-r--r--arch/x86/include/asm/page_32_types.h1
-rw-r--r--arch/x86/include/asm/page_64_types.h11
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c33
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c49
-rw-r--r--arch/x86/kernel/dumpstack_64.c1
-rw-r--r--arch/x86/kernel/entry_64.S81
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/kernel/traps.c71
-rw-r--r--arch/x86/kvm/emulate.c8
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/lib/csum-wrappers_64.c5
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/tools/calc_run_size.pl39
-rw-r--r--arch/x86/xen/smp.c3
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/boot/dts/lx200mx.dts16
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig131
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig135
-rw-r--r--arch/xtensa/include/asm/pgtable.h2
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h12
403 files changed, 14922 insertions, 1615 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 03dc4c1a8736..d8f6a2ec3d4e 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1187,7 +1187,7 @@ config DEBUG_UART_VIRT
default 0xf1c28000 if DEBUG_SUNXI_UART0
default 0xf1c28400 if DEBUG_SUNXI_UART1
default 0xf1f02800 if DEBUG_SUNXI_R_UART
- default 0xf2100000 if DEBUG_PXA_UART1
+ default 0xf6200000 if DEBUG_PXA_UART1
default 0xf4090000 if ARCH_LPC32XX
default 0xf4200000 if ARCH_GEMINI
default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 413fd94b5301..68be9017593d 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -397,8 +397,7 @@ dtb_check_done:
add sp, sp, r6
#endif
- tst r4, #1
- bleq cache_clean_flush
+ bl cache_clean_flush
adr r0, BSYM(restart)
add r0, r0, r6
@@ -1047,6 +1046,8 @@ cache_clean_flush:
b call_cache_fn
__armv4_mpu_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r2, #1
mov r3, #0
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush:
mov pc, lr
__fa526_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
@@ -1072,13 +1075,16 @@ __fa526_cache_flush:
__armv6_mmu_cache_flush:
mov r1, #0
- mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
+ tst r4, #1
+ mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
- mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
+ mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr
__armv7_mmu_cache_flush:
+ tst r4, #1
+ bne iflush
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
mov r10, #0
@@ -1139,6 +1145,8 @@ iflush:
mov pc, lr
__armv5tej_mmu_cache_flush:
+ tst r4, #1
+ movne pc, lr
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
@@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush:
mov pc, lr
__armv4_mmu_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r2, #64*1024 @ default: 32K dcache size (*2)
mov r11, #32 @ default: 32 byte line size
mrc p15, 0, r3, c0, c0, 1 @ read cache type
@@ -1179,6 +1189,8 @@ no_cache_id:
__armv3_mmu_cache_flush:
__armv3_mpu_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index e2156a583de7..c4b968f0feb5 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -489,7 +489,7 @@
reg = <0x00060000 0x00020000>;
};
partition@4 {
- label = "NAND.u-boot-spl";
+ label = "NAND.u-boot-spl-os";
reg = <0x00080000 0x00040000>;
};
partition@5 {
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index e7ac47fa6615..a521ac0a7d5a 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -291,8 +291,8 @@
dcdc3: regulator-dcdc3 {
compatible = "ti,tps65218-dcdc3";
regulator-name = "vdcdc3";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 859ff3d620ee..87aa4f3b8b3d 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -363,8 +363,8 @@
dcdc3: regulator-dcdc3 {
compatible = "ti,tps65218-dcdc3";
regulator-name = "vdds_ddr";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index ac3e4859935f..f7e9bba10bd6 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -358,8 +358,8 @@
dcdc3: regulator-dcdc3 {
compatible = "ti,tps65218-dcdc3";
regulator-name = "vdcdc3";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index e51fcef884a4..60429ad1c5d8 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -624,4 +624,8 @@
num-cs = <1>;
};
+&usbdrd_dwc3 {
+ dr_mode = "host";
+};
+
#include "cros-ec-keyboard.dtsi"
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f21b9aa00fbb..d55c1a2eb798 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -555,7 +555,7 @@
#size-cells = <1>;
ranges;
- dwc3 {
+ usbdrd_dwc3: dwc3 {
compatible = "synopsys,dwc3";
reg = <0x12000000 0x10000>;
interrupts = <0 72 0>;
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index c8747c7f1cc8..127f3e7c10c4 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -2,6 +2,7 @@
* Common support for omap3 EVM boards
*/
+#include <dt-bindings/input/input.h>
#include "omap-gpmc-smsc911x.dtsi"
/ {
@@ -111,6 +112,26 @@
ti,use-leds;
};
+&twl_keypad {
+ linux,keymap = <
+ MATRIX_KEY(2, 2, KEY_1)
+ MATRIX_KEY(1, 1, KEY_2)
+ MATRIX_KEY(0, 0, KEY_3)
+ MATRIX_KEY(3, 2, KEY_4)
+ MATRIX_KEY(2, 1, KEY_5)
+ MATRIX_KEY(1, 0, KEY_6)
+ MATRIX_KEY(1, 3, KEY_7)
+ MATRIX_KEY(3, 1, KEY_8)
+ MATRIX_KEY(2, 0, KEY_9)
+ MATRIX_KEY(2, 3, KEY_KPASTERISK)
+ MATRIX_KEY(0, 2, KEY_0)
+ MATRIX_KEY(3, 0, KEY_KPDOT)
+ /* s4 not wired */
+ MATRIX_KEY(1, 2, KEY_BACKSPACE)
+ MATRIX_KEY(0, 1, KEY_ENTER)
+ >;
+};
+
&usb_otg_hs {
interface-type = <0>;
usb-phy = <&usb2_phy>;
diff --git a/arch/arm/boot/dts/omap3-ldp.dts b/arch/arm/boot/dts/omap3-ldp.dts
index 72dca0b7904d..77fee3fb7515 100644
--- a/arch/arm/boot/dts/omap3-ldp.dts
+++ b/arch/arm/boot/dts/omap3-ldp.dts
@@ -7,6 +7,7 @@
*/
/dts-v1/;
+#include <dt-bindings/input/input.h>
#include "omap34xx.dtsi"
#include "omap-gpmc-smsc911x.dtsi"
@@ -141,7 +142,7 @@
};
partition@2000000 {
label = "Filesystem";
- reg = <0x2000000 0xe000000>;
+ reg = <0x2000000 0x6000000>;
};
};
@@ -263,6 +264,26 @@
};
};
+&twl_keypad {
+ linux,keymap = <MATRIX_KEY(0, 0, KEY_1)
+ MATRIX_KEY(0, 1, KEY_2)
+ MATRIX_KEY(0, 2, KEY_3)
+ MATRIX_KEY(1, 0, KEY_4)
+ MATRIX_KEY(1, 1, KEY_5)
+ MATRIX_KEY(1, 2, KEY_6)
+ MATRIX_KEY(1, 3, KEY_F5)
+ MATRIX_KEY(2, 0, KEY_7)
+ MATRIX_KEY(2, 1, KEY_8)
+ MATRIX_KEY(2, 2, KEY_9)
+ MATRIX_KEY(2, 3, KEY_F6)
+ MATRIX_KEY(3, 0, KEY_F7)
+ MATRIX_KEY(3, 1, KEY_0)
+ MATRIX_KEY(3, 2, KEY_F8)
+ MATRIX_KEY(5, 4, KEY_RESERVED)
+ MATRIX_KEY(4, 4, KEY_VOLUMEUP)
+ MATRIX_KEY(5, 5, KEY_VOLUMEDOWN)>;
+};
+
&uart3 {
interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>;
};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index d0e884d3a737..e602e75ce5b7 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -79,7 +79,7 @@
* hierarchy.
*/
ocp {
- compatible = "simple-bus";
+ compatible = "ti,omap3-l3-smx", "simple-bus";
reg = <0x68000000 0x10000>;
interrupts = <9 10>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index d46c213a17ad..eed697a6bd6b 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -433,7 +433,7 @@
clocks = <&cpg_clocks R8A7740_CLK_S>,
<&cpg_clocks R8A7740_CLK_S>, <&sub_clk>,
<&cpg_clocks R8A7740_CLK_B>,
- <&sub_clk>, <&sub_clk>,
+ <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>,
<&cpg_clocks R8A7740_CLK_B>;
#clock-cells = <1>;
renesas,clock-indices = <
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index b7c59b7b06b0..0c20c90d8c06 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -756,9 +756,9 @@
#clock-cells = <0>;
clock-output-names = "sd2";
};
- sd3_clk: sd3_clk@e615007c {
+ sd3_clk: sd3_clk@e615026c {
compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
- reg = <0 0xe615007c 0 4>;
+ reg = <0 0xe615026c 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
clock-output-names = "sd3";
diff --git a/arch/arm/boot/dts/sama5d31.dtsi b/arch/arm/boot/dts/sama5d31.dtsi
index 7997dc9863ed..883878b32971 100644
--- a/arch/arm/boot/dts/sama5d31.dtsi
+++ b/arch/arm/boot/dts/sama5d31.dtsi
@@ -12,5 +12,5 @@
#include "sama5d3_uart.dtsi"
/ {
- compatible = "atmel,samad31", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
};
diff --git a/arch/arm/boot/dts/sama5d33.dtsi b/arch/arm/boot/dts/sama5d33.dtsi
index 39f832253caf..4b4434aca351 100644
--- a/arch/arm/boot/dts/sama5d33.dtsi
+++ b/arch/arm/boot/dts/sama5d33.dtsi
@@ -10,5 +10,5 @@
#include "sama5d3_gmac.dtsi"
/ {
- compatible = "atmel,samad33", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d33", "atmel,sama5d3", "atmel,sama5";
};
diff --git a/arch/arm/boot/dts/sama5d34.dtsi b/arch/arm/boot/dts/sama5d34.dtsi
index 89cda2c0da39..aa01573fdee9 100644
--- a/arch/arm/boot/dts/sama5d34.dtsi
+++ b/arch/arm/boot/dts/sama5d34.dtsi
@@ -12,5 +12,5 @@
#include "sama5d3_mci2.dtsi"
/ {
- compatible = "atmel,samad34", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d34", "atmel,sama5d3", "atmel,sama5";
};
diff --git a/arch/arm/boot/dts/sama5d35.dtsi b/arch/arm/boot/dts/sama5d35.dtsi
index d20cd71b5f0e..16c39f4c96a4 100644
--- a/arch/arm/boot/dts/sama5d35.dtsi
+++ b/arch/arm/boot/dts/sama5d35.dtsi
@@ -14,5 +14,5 @@
#include "sama5d3_tcb1.dtsi"
/ {
- compatible = "atmel,samad35", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d35", "atmel,sama5d3", "atmel,sama5";
};
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi
index db58cad6acd3..e85139ef40af 100644
--- a/arch/arm/boot/dts/sama5d36.dtsi
+++ b/arch/arm/boot/dts/sama5d36.dtsi
@@ -16,5 +16,5 @@
#include "sama5d3_uart.dtsi"
/ {
- compatible = "atmel,samad36", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d36", "atmel,sama5d3", "atmel,sama5";
};
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index 962dc28dc37b..cfcd200b0c17 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -8,7 +8,7 @@
*/
/ {
- compatible = "atmel,samad3xcm", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
chosen {
bootargs = "console=ttyS0,115200 rootfstype=ubifs ubi.mtd=5 root=ubi0:rootfs";
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 543f895d18d3..2e652e2339e9 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -361,6 +361,10 @@
clocks = <&ahb1_gates 6>;
resets = <&ahb1_rst 6>;
#dma-cells = <1>;
+
+ /* DMA controller requires AHB1 clocked from PLL6 */
+ assigned-clocks = <&ahb1_mux>;
+ assigned-clock-parents = <&pll6>;
};
mmc0: mmc@01c0f000 {
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 5c21d216515a..8b7aa0dcdc6e 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -15,6 +15,7 @@
aliases {
rtc0 = "/i2c@7000d000/tps65913@58";
rtc1 = "/rtc@7000e000";
+ serial0 = &uartd;
};
memory {
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts
index c7c6825f11fb..38acf78d7815 100644
--- a/arch/arm/boot/dts/tegra114-roth.dts
+++ b/arch/arm/boot/dts/tegra114-roth.dts
@@ -15,6 +15,10 @@
linux,initrd-end = <0x82800000>;
};
+ aliases {
+ serial0 = &uartd;
+ };
+
firmware {
trusted-foundations {
compatible = "tlm,trusted-foundations";
@@ -916,8 +920,6 @@
regulator-name = "vddio-sdmmc3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
};
ldousb {
@@ -962,7 +964,7 @@
sdhci@78000400 {
status = "okay";
bus-width = <4>;
- vmmc-supply = <&vddio_sdmmc3>;
+ vqmmc-supply = <&vddio_sdmmc3>;
cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>;
};
@@ -971,7 +973,6 @@
sdhci@78000600 {
status = "okay";
bus-width = <8>;
- vmmc-supply = <&vdd_1v8>;
non-removable;
};
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts
index 963662145635..f91c2c9b2f94 100644
--- a/arch/arm/boot/dts/tegra114-tn7.dts
+++ b/arch/arm/boot/dts/tegra114-tn7.dts
@@ -15,6 +15,10 @@
linux,initrd-end = <0x82800000>;
};
+ aliases {
+ serial0 = &uartd;
+ };
+
firmware {
trusted-foundations {
compatible = "tlm,trusted-foundations";
@@ -240,7 +244,6 @@
sdhci@78000600 {
status = "okay";
bus-width = <8>;
- vmmc-supply = <&vdd_1v8>;
non-removable;
};
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 2ca9c1807f72..222f3b3f4dd5 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -9,13 +9,6 @@
compatible = "nvidia,tegra114";
interrupt-parent = <&gic>;
- aliases {
- serial0 = &uarta;
- serial1 = &uartb;
- serial2 = &uartc;
- serial3 = &uartd;
- };
-
host1x@50000000 {
compatible = "nvidia,tegra114-host1x", "simple-bus";
reg = <0x50000000 0x00028000>;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 029c9a021541..51b373ff1065 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@0,7000d000/pmic@40";
rtc1 = "/rtc@0,7000e000";
+ serial0 = &uartd;
};
memory {
diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts
index 7d0784ce4c74..53181d310247 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-big.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@0,7000d000/pmic@40";
rtc1 = "/rtc@0,7000e000";
+ serial0 = &uarta;
};
memory {
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 13008858e967..5c3f7813360d 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@0,7000d000/pmic@40";
rtc1 = "/rtc@0,7000e000";
+ serial0 = &uarta;
};
memory {
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 478c555ebd96..df2b06b29985 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -286,7 +286,7 @@
* the APB DMA based serial driver, the comptible is
* "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
*/
- serial@0,70006000 {
+ uarta: serial@0,70006000 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006000 0x0 0x40>;
reg-shift = <2>;
@@ -299,7 +299,7 @@
status = "disabled";
};
- serial@0,70006040 {
+ uartb: serial@0,70006040 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006040 0x0 0x40>;
reg-shift = <2>;
@@ -312,7 +312,7 @@
status = "disabled";
};
- serial@0,70006200 {
+ uartc: serial@0,70006200 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006200 0x0 0x40>;
reg-shift = <2>;
@@ -325,7 +325,7 @@
status = "disabled";
};
- serial@0,70006300 {
+ uartd: serial@0,70006300 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006300 0x0 0x40>;
reg-shift = <2>;
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index a37279af687c..b926a07b9443 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@7000d000/tps6586x@34";
rtc1 = "/rtc@7000e000";
+ serial0 = &uartd;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20-iris-512.dts b/arch/arm/boot/dts/tegra20-iris-512.dts
index 8cfb83f42e1f..1dd7d7bfdfcc 100644
--- a/arch/arm/boot/dts/tegra20-iris-512.dts
+++ b/arch/arm/boot/dts/tegra20-iris-512.dts
@@ -6,6 +6,11 @@
model = "Toradex Colibri T20 512MB on Iris";
compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20";
+ aliases {
+ serial0 = &uarta;
+ serial1 = &uartd;
+ };
+
host1x@50000000 {
hdmi@54280000 {
status = "okay";
diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts
index 1b7c56b33aca..9b87526ab0b7 100644
--- a/arch/arm/boot/dts/tegra20-medcom-wide.dts
+++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts
@@ -6,6 +6,10 @@
model = "Avionic Design Medcom-Wide board";
compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20";
+ aliases {
+ serial0 = &uartd;
+ };
+
pwm@7000a000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index d4438e30de45..ed7e1009326c 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -10,6 +10,8 @@
aliases {
rtc0 = "/i2c@7000d000/tps6586x@34";
rtc1 = "/rtc@7000e000";
+ serial0 = &uarta;
+ serial1 = &uartc;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index a1d4bf9895d7..ea282c7c0ca5 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@7000d000/tps6586x@34";
rtc1 = "/rtc@7000e000";
+ serial0 = &uartd;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
index 80e7d386ce34..13d4e6185275 100644
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
@@ -7,6 +7,7 @@
aliases {
rtc0 = "/i2c@7000d000/tps6586x@34";
rtc1 = "/rtc@7000e000";
+ serial0 = &uartd;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 5ad87979ab13..d99af4ef9c64 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@7000c500/rtc@56";
rtc1 = "/rtc@7000e000";
+ serial0 = &uarta;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index ca8484cccddc..04c58e9ca490 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@7000d000/tps6586x@34";
rtc1 = "/rtc@7000e000";
+ serial0 = &uartd;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index 1843725785c9..340d81108df1 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -10,6 +10,7 @@
aliases {
rtc0 = "/i2c@7000d000/max8907@3c";
rtc1 = "/rtc@7000e000";
+ serial0 = &uarta;
};
memory {
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 3b374c49d04d..8acf5d85c99d 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -9,14 +9,6 @@
compatible = "nvidia,tegra20";
interrupt-parent = <&intc>;
- aliases {
- serial0 = &uarta;
- serial1 = &uartb;
- serial2 = &uartc;
- serial3 = &uartd;
- serial4 = &uarte;
- };
-
host1x@50000000 {
compatible = "nvidia,tegra20-host1x", "simple-bus";
reg = <0x50000000 0x00024000>;
diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts
index 45d40f024585..6236bdecb48b 100644
--- a/arch/arm/boot/dts/tegra30-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts
@@ -11,6 +11,10 @@
rtc0 = "/i2c@7000c000/rtc@68";
rtc1 = "/i2c@7000d000/tps65911@2d";
rtc2 = "/rtc@7000e000";
+ serial0 = &uarta;
+ serial1 = &uartb;
+ serial2 = &uartc;
+ serial3 = &uartd;
};
pcie-controller@00003000 {
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index cee8f2246fdb..6b157eeabcc5 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -9,6 +9,7 @@
aliases {
rtc0 = "/i2c@7000d000/tps65911@2d";
rtc1 = "/rtc@7000e000";
+ serial0 = &uarta;
};
memory {
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 206379546244..a1b682ea01bd 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -30,6 +30,8 @@
aliases {
rtc0 = "/i2c@7000d000/tps65911@2d";
rtc1 = "/rtc@7000e000";
+ serial0 = &uarta;
+ serial1 = &uartc;
};
memory {
diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
index 7793abd5bef1..4d3ddc585641 100644
--- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
@@ -10,6 +10,9 @@
rtc0 = "/i2c@7000c000/rtc@68";
rtc1 = "/i2c@7000d000/tps65911@2d";
rtc2 = "/rtc@7000e000";
+ serial0 = &uarta;
+ serial1 = &uartb;
+ serial2 = &uartd;
};
host1x@50000000 {
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index aa6ccea13d30..b270b9e3d455 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -9,14 +9,6 @@
compatible = "nvidia,tegra30";
interrupt-parent = <&intc>;
- aliases {
- serial0 = &uarta;
- serial1 = &uartb;
- serial2 = &uartc;
- serial3 = &uartd;
- serial4 = &uarte;
- };
-
pcie-controller@00003000 {
compatible = "nvidia,tegra30-pcie";
device_type = "pci";
diff --git a/arch/arm/boot/dts/vf610-cosmic.dts b/arch/arm/boot/dts/vf610-cosmic.dts
index 3fd1b74e1216..de1b453c2932 100644
--- a/arch/arm/boot/dts/vf610-cosmic.dts
+++ b/arch/arm/boot/dts/vf610-cosmic.dts
@@ -33,6 +33,13 @@
};
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1>;
+ bus-width = <4>;
+ status = "okay";
+};
+
&fec1 {
phy-mode = "rmii";
pinctrl-names = "default";
@@ -42,6 +49,18 @@
&iomuxc {
vf610-cosmic {
+ pinctrl_esdhc1: esdhc1grp {
+ fsl,pins = <
+ VF610_PAD_PTA24__ESDHC1_CLK 0x31ef
+ VF610_PAD_PTA25__ESDHC1_CMD 0x31ef
+ VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef
+ VF610_PAD_PTA27__ESDHC1_DAT1 0x31ef
+ VF610_PAD_PTA28__ESDHC1_DATA2 0x31ef
+ VF610_PAD_PTA29__ESDHC1_DAT3 0x31ef
+ VF610_PAD_PTB28__GPIO_98 0x219d
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2
diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts
index e1f51ca127fe..0429bbd89fba 100644
--- a/arch/arm/boot/dts/zynq-parallella.dts
+++ b/arch/arm/boot/dts/zynq-parallella.dts
@@ -34,6 +34,10 @@
};
};
+&clkc {
+ fclk-enable = <0xf>;
+};
+
&gem0 {
status = "okay";
phy-mode = "rgmii-id";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index d86771abbf57..72041f002b7e 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/edma.h>
+#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
@@ -1623,6 +1624,11 @@ static int edma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int ret;
+ struct platform_device_info edma_dev_info = {
+ .name = "edma-dma-engine",
+ .dma_mask = DMA_BIT_MASK(32),
+ .parent = &pdev->dev,
+ };
if (node) {
/* Check if this is a second instance registered */
@@ -1793,6 +1799,9 @@ static int edma_probe(struct platform_device *pdev)
edma_write_array(j, EDMA_QRAE, i, 0x0);
}
arch_num_cc++;
+
+ edma_dev_info.id = j;
+ platform_device_register_full(&edma_dev_info);
}
return 0;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 72058b8a6f4d..e21ef830a483 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y
CONFIG_MMC_DW_EXYNOS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_MAX77802=y
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_S3C=y
CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
CONFIG_COMMON_CLK_MAX77686=y
+CONFIG_COMMON_CLK_MAX77802=y
CONFIG_COMMON_CLK_S2MPS11=y
CONFIG_EXYNOS_IOMMU=y
CONFIG_IIO=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index e688741c89aa..e6b0007355f8 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -97,6 +97,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IMX=y
+CONFIG_SPI=y
CONFIG_SPI_IMX=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 8fca6e276b69..6790f1b3f3a1 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -158,6 +158,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_ALGOPCF=m
CONFIG_I2C_ALGOPCA=m
CONFIG_I2C_IMX=y
+CONFIG_SPI=y
CONFIG_SPI_IMX=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_MC9S08DZ60=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index f1dc7fc668f3..9d7a32f93fcf 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_EXYNOS5=y
CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_S3C2410=y
CONFIG_I2C_SIRF=y
CONFIG_I2C_TEGRA=y
CONFIG_I2C_ST=y
@@ -235,6 +236,7 @@ CONFIG_SPI_TEGRA20_SLINK=y
CONFIG_SPI_XILINX=y
CONFIG_PINCTRL_AS3722=y
CONFIG_PINCTRL_PALMAS=y
+CONFIG_PINCTRL_APQ8084=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_GPIO_DWAPB=y
@@ -411,6 +413,7 @@ CONFIG_NVEC_POWER=y
CONFIG_NVEC_PAZ00=y
CONFIG_QCOM_GSBI=y
CONFIG_COMMON_CLK_QCOM=y
+CONFIG_APQ_MMCC_8084=y
CONFIG_MSM_GCC_8660=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 16e719c268dd..b3f86670d2eb 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -86,7 +86,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
CONFIG_NETFILTER=y
CONFIG_CAN=m
CONFIG_CAN_C_CAN=m
@@ -112,6 +111,7 @@ CONFIG_MTD_OOPS=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC_BCH=y
CONFIG_MTD_NAND_OMAP2=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
@@ -317,7 +317,7 @@ CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS4_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index d7a5855a5db8..a2956c3112f1 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -11,23 +12,17 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_HOTPLUG=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SOCFPGA=y
-CONFIG_MACH_SOCFPGA_CYCLONE5=y
CONFIG_ARM_THUMBEE=y
-# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
-# CONFIG_CACHE_L2X0 is not set
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE=""
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_NET=y
@@ -41,38 +36,30 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_IPV6=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_GW=y
-CONFIG_CAN_DEV=y
-CONFIG_CAN_CALC_BITTIMING=y
CONFIG_CAN_C_CAN=y
CONFIG_CAN_C_CAN_PLATFORM=y
CONFIG_CAN_DEBUG_DEVICES=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
-CONFIG_PROC_DEVICETREE=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SRAM=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_STMMAC_ETH=y
+CONFIG_DWMAC_SOCFPGA=y
CONFIG_MICREL_PHY=y
-# CONFIG_STMMAC_PHY_ID_ZERO_WORKAROUND is not set
CONFIG_INPUT_EVDEV=y
-CONFIG_DWMAC_SOCFPGA=y
-CONFIG_PPS=y
-CONFIG_NETWORK_PHY_TIMESTAMPING=y
-CONFIG_PTP_1588_CLOCK=y
-CONFIG_VLAN_8021Q=y
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_GARP=y
-CONFIG_IPV6=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
@@ -81,45 +68,43 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_8250_DW=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
-# CONFIG_RTC_HCTOSYS is not set
+CONFIG_PMBUS=y
+CONFIG_SENSORS_LTC2978=y
+CONFIG_SENSORS_LTC2978_REGULATOR=y
CONFIG_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_USB=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_HOST=y
+CONFIG_MMC=y
+CONFIG_MMC_DW=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY_USER is not set
-CONFIG_FHANDLE=y
+CONFIG_EXT4_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_DEBUG_USER=y
CONFIG_XZ_DEC=y
-CONFIG_I2C=y
-CONFIG_I2C_DESIGNWARE_CORE=y
-CONFIG_I2C_DESIGNWARE_PLATFORM=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_MMC=y
-CONFIG_MMC_DW=y
-CONFIG_PM=y
-CONFIG_SUSPEND=y
-CONFIG_MMC_UNSAFE_RESUME=y
-CONFIG_USB=y
-CONFIG_USB_DWC2=y
-CONFIG_USB_DWC2_HOST=y
-CONFIG_USB_DWC2_PLATFORM=y
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index fc44d3761f9e..ce73ab635414 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -44,16 +44,6 @@ struct cpu_context_save {
__u32 extra[2]; /* Xscale 'acc' register, etc */
};
-struct arm_restart_block {
- union {
- /* For user cache flushing */
- struct {
- unsigned long start;
- unsigned long end;
- } cache;
- };
-};
-
/*
* low level task data that entry.S needs immediate access to.
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
struct restart_block restart_block;
- struct arm_restart_block arm_restart_block;
};
#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 0c8b10801d36..9f5d81881eb6 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
-static long do_cache_op_restart(struct restart_block *);
-
static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
@@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end)
do {
unsigned long chunk = min(PAGE_SIZE, end - start);
- if (signal_pending(current)) {
- struct thread_info *ti = current_thread_info();
-
- ti->restart_block = (struct restart_block) {
- .fn = do_cache_op_restart,
- };
-
- ti->arm_restart_block = (struct arm_restart_block) {
- {
- .cache = {
- .start = start,
- .end = end,
- },
- },
- };
-
- return -ERESTART_RESTARTBLOCK;
- }
+ if (fatal_signal_pending(current))
+ return 0;
ret = flush_cache_user_range(start, start + chunk);
if (ret)
@@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end)
return 0;
}
-static long do_cache_op_restart(struct restart_block *unused)
-{
- struct arm_restart_block *restart_block;
-
- restart_block = &current_thread_info()->arm_restart_block;
- return __do_cache_op(restart_block->cache.start,
- restart_block->cache.end);
-}
-
static inline int
do_cache_op(unsigned long start, unsigned long end, int flags)
{
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 57a403a5c22b..8664ff17cbbe 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
pgd = pgdp + pgd_index(addr);
do {
next = kvm_pgd_addr_end(addr, end);
- unmap_puds(kvm, pgd, addr, next);
+ if (!pgd_none(*pgd))
+ unmap_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
return kvm_vcpu_dabt_iswrite(vcpu);
}
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+ return !pfn_valid(pfn);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_pfn(pfn))
return -EFAULT;
- if (kvm_is_mmio_pfn(pfn))
+ if (kvm_is_device_pfn(pfn))
mem_type = PAGE_S2_DEVICE;
spin_lock(&kvm->mmu_lock);
diff --git a/arch/arm/mach-at91/include/mach/atmel-mci.h b/arch/arm/mach-at91/include/mach/atmel-mci.h
deleted file mode 100644
index 3069e4135573..000000000000
--- a/arch/arm/mach-at91/include/mach/atmel-mci.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __MACH_ATMEL_MCI_H
-#define __MACH_ATMEL_MCI_H
-
-#include <linux/platform_data/dma-atmel.h>
-
-/**
- * struct mci_dma_data - DMA data for MCI interface
- */
-struct mci_dma_data {
- struct at_dma_slave sdata;
-};
-
-/* accessor macros */
-#define slave_data_ptr(s) (&(s)->sdata)
-#define find_slave_dev(s) ((s)->sdata.dma_dev)
-
-#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
index d8bfd02f5047..88a4c9b089a5 100644
--- a/arch/arm/mach-ep93xx/dma.c
+++ b/arch/arm/mach-ep93xx/dma.c
@@ -66,11 +66,15 @@ static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
.num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
};
+static u64 ep93xx_dma_m2p_mask = DMA_BIT_MASK(32);
+
static struct platform_device ep93xx_dma_m2p_device = {
.name = "ep93xx-dma-m2p",
.id = -1,
.dev = {
- .platform_data = &ep93xx_dma_m2p_data,
+ .platform_data = &ep93xx_dma_m2p_data,
+ .dma_mask = &ep93xx_dma_m2p_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -93,11 +97,15 @@ static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
.num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
};
+static u64 ep93xx_dma_m2m_mask = DMA_BIT_MASK(32);
+
static struct platform_device ep93xx_dma_m2m_device = {
.name = "ep93xx-dma-m2m",
.id = -1,
.dev = {
- .platform_data = &ep93xx_dma_m2m_data,
+ .platform_data = &ep93xx_dma_m2m_data,
+ .dma_mask = &ep93xx_dma_m2m_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index ff9d23f0a7d9..d8fa0337db73 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -329,7 +329,7 @@ static unsigned int const exynos5_list_both_cnt_feed[] = {
EXYNOS5_TOP_PWR_SYSMEM_OPTION,
};
-static unsigned int const exynos5_list_diable_wfi_wfe[] = {
+static unsigned int const exynos5_list_disable_wfi_wfe[] = {
EXYNOS5_ARM_CORE1_OPTION,
EXYNOS5_FSYS_ARM_OPTION,
EXYNOS5_ISP_ARM_OPTION,
@@ -360,11 +360,11 @@ static void exynos5_init_pmu(void)
/*
* Disable WFI/WFE on XXX_OPTION
*/
- for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
- tmp = pmu_raw_readl(exynos5_list_diable_wfi_wfe[i]);
+ for (i = 0 ; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe) ; i++) {
+ tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
EXYNOS5_OPTION_USE_STANDBYWFI);
- pmu_raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
+ pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
}
}
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index a17818475050..409637254594 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -58,8 +58,14 @@
#define PFD_PLL1_BASE (anatop_base + 0x2b0)
#define PFD_PLL2_BASE (anatop_base + 0x100)
#define PFD_PLL3_BASE (anatop_base + 0xf0)
+#define PLL1_CTRL (anatop_base + 0x270)
+#define PLL2_CTRL (anatop_base + 0x30)
#define PLL3_CTRL (anatop_base + 0x10)
+#define PLL4_CTRL (anatop_base + 0x70)
+#define PLL5_CTRL (anatop_base + 0xe0)
+#define PLL6_CTRL (anatop_base + 0xa0)
#define PLL7_CTRL (anatop_base + 0x20)
+#define ANA_MISC1 (anatop_base + 0x160)
static void __iomem *anatop_base;
static void __iomem *ccm_base;
@@ -67,25 +73,34 @@ static void __iomem *ccm_base;
/* sources for multiplexer clocks, this is used multiple times */
static const char *fast_sels[] = { "firc", "fxosc", };
static const char *slow_sels[] = { "sirc_32k", "sxosc", };
-static const char *pll1_sels[] = { "pll1_main", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", };
-static const char *pll2_sels[] = { "pll2_main", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", };
-static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_main", "pll1_pfd_sel", "pll3_main", };
+static const char *pll1_sels[] = { "pll1_sys", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", };
+static const char *pll2_sels[] = { "pll2_bus", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", };
+static const char *pll_bypass_src_sels[] = { "fast_clk_sel", "lvds1_in", };
+static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
+static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
+static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
+static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
+static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
+static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
+static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
+static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_bus", "pll1_pfd_sel", "pll3_usb_otg", };
static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", };
static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", };
static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", };
-static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", };
-static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", };
+static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
+static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", };
-static const char *qspi_sels[] = { "pll3_main", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", };
-static const char *esdhc_sels[] = { "pll3_main", "pll3_pfd3", "pll1_pfd3", "platform_bus", };
-static const char *dcu_sels[] = { "pll1_pfd2", "pll3_main", };
+static const char *qspi_sels[] = { "pll3_usb_otg", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", };
+static const char *esdhc_sels[] = { "pll3_usb_otg", "pll3_pfd3", "pll1_pfd3", "platform_bus", };
+static const char *dcu_sels[] = { "pll1_pfd2", "pll3_usb_otg", };
static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", };
-static const char *vadc_sels[] = { "pll6_main_div", "pll3_main_div", "pll3_main", };
+static const char *vadc_sels[] = { "pll6_video_div", "pll3_usb_otg_div", "pll3_usb_otg", };
/* FTM counter clock source, not module clock */
static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", };
static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", };
-static struct clk_div_table pll4_main_div_table[] = {
+
+static struct clk_div_table pll4_audio_div_table[] = {
{ .val = 0, .div = 1 },
{ .val = 1, .div = 2 },
{ .val = 2, .div = 6 },
@@ -120,6 +135,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0);
clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0);
+ /* Clock source from external clock via LVDs PAD */
+ clk[VF610_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
+
clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2);
np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
@@ -133,31 +151,63 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels));
clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels));
- clk[VF610_CLK_PLL1_MAIN] = imx_clk_fixed_factor("pll1_main", "fast_clk_sel", 22, 1);
- clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_main", PFD_PLL1_BASE, 0);
- clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_main", PFD_PLL1_BASE, 1);
- clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_main", PFD_PLL1_BASE, 2);
- clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_main", PFD_PLL1_BASE, 3);
-
- clk[VF610_CLK_PLL2_MAIN] = imx_clk_fixed_factor("pll2_main", "fast_clk_sel", 22, 1);
- clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_main", PFD_PLL2_BASE, 0);
- clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_main", PFD_PLL2_BASE, 1);
- clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_main", PFD_PLL2_BASE, 2);
- clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_main", PFD_PLL2_BASE, 3);
-
- clk[VF610_CLK_PLL3_MAIN] = imx_clk_fixed_factor("pll3_main", "fast_clk_sel", 20, 1);
- clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_main", PFD_PLL3_BASE, 0);
- clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_main", PFD_PLL3_BASE, 1);
- clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_main", PFD_PLL3_BASE, 2);
- clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_main", PFD_PLL3_BASE, 3);
-
- clk[VF610_CLK_PLL4_MAIN] = imx_clk_fixed_factor("pll4_main", "fast_clk_sel", 25, 1);
- /* Enet pll: fixed 50Mhz */
- clk[VF610_CLK_PLL5_MAIN] = imx_clk_fixed_factor("pll5_main", "fast_clk_sel", 125, 6);
- /* pll6: default 960Mhz */
- clk[VF610_CLK_PLL6_MAIN] = imx_clk_fixed_factor("pll6_main", "fast_clk_sel", 40, 1);
- /* pll7: USB1 PLL at 480MHz */
- clk[VF610_CLK_PLL7_MAIN] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_main", "fast_clk_sel", PLL7_CTRL, 0x2);
+ clk[VF610_CLK_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", PLL1_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clk[VF610_CLK_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", PLL2_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clk[VF610_CLK_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", PLL3_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clk[VF610_CLK_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", PLL4_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clk[VF610_CLK_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", PLL5_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+
+ clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1);
+ clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1);
+ clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x1);
+ clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f);
+ clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3);
+ clk[VF610_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_AV, "pll6", "pll6_bypass_src", PLL6_CTRL, 0x7f);
+ clk[VF610_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", PLL7_CTRL, 0x1);
+
+ clk[VF610_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", PLL1_CTRL, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clk[VF610_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", PLL2_CTRL, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clk[VF610_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", PLL3_CTRL, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ clk[VF610_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", PLL4_CTRL, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
+ clk[VF610_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", PLL5_CTRL, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+ clk[VF610_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", PLL6_CTRL, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
+ clk[VF610_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", PLL7_CTRL, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+
+ /* Do not bypass PLLs initially */
+ clk_set_parent(clk[VF610_PLL1_BYPASS], clk[VF610_CLK_PLL1]);
+ clk_set_parent(clk[VF610_PLL2_BYPASS], clk[VF610_CLK_PLL2]);
+ clk_set_parent(clk[VF610_PLL3_BYPASS], clk[VF610_CLK_PLL3]);
+ clk_set_parent(clk[VF610_PLL4_BYPASS], clk[VF610_CLK_PLL4]);
+ clk_set_parent(clk[VF610_PLL5_BYPASS], clk[VF610_CLK_PLL5]);
+ clk_set_parent(clk[VF610_PLL6_BYPASS], clk[VF610_CLK_PLL6]);
+ clk_set_parent(clk[VF610_PLL7_BYPASS], clk[VF610_CLK_PLL7]);
+
+ clk[VF610_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", PLL1_CTRL, 13);
+ clk[VF610_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", PLL2_CTRL, 13);
+ clk[VF610_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", PLL3_CTRL, 13);
+ clk[VF610_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", PLL4_CTRL, 13);
+ clk[VF610_CLK_PLL5_ENET] = imx_clk_gate("pll5_enet", "pll5_bypass", PLL5_CTRL, 13);
+ clk[VF610_CLK_PLL6_VIDEO] = imx_clk_gate("pll6_video", "pll6_bypass", PLL6_CTRL, 13);
+ clk[VF610_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", PLL7_CTRL, 13);
+
+ clk[VF610_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", ANA_MISC1, 12, BIT(10));
+
+ clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_sys", PFD_PLL1_BASE, 0);
+ clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_sys", PFD_PLL1_BASE, 1);
+ clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_sys", PFD_PLL1_BASE, 2);
+ clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_sys", PFD_PLL1_BASE, 3);
+
+ clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", PFD_PLL2_BASE, 0);
+ clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", PFD_PLL2_BASE, 1);
+ clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_bus", PFD_PLL2_BASE, 2);
+ clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_bus", PFD_PLL2_BASE, 3);
+
+ clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", PFD_PLL3_BASE, 0);
+ clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", PFD_PLL3_BASE, 1);
+ clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", PFD_PLL3_BASE, 2);
+ clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_usb_otg", PFD_PLL3_BASE, 3);
clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5);
clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5);
@@ -167,12 +217,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3);
clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2);
- clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_main_div", "pll3_main", CCM_CACRR, 20, 1);
- clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_main_div", "pll4_main", 0, CCM_CACRR, 6, 3, 0, pll4_main_div_table, &imx_ccm_lock);
- clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_main_div", "pll6_main", CCM_CACRR, 21, 1);
+ clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_usb_otg_div", "pll3_usb_otg", CCM_CACRR, 20, 1);
+ clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock);
+ clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1);
- clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_main", PLL3_CTRL, 6);
- clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_main", PLL7_CTRL, 6);
+ clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6);
+ clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6);
clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4));
clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4));
@@ -191,8 +241,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1);
clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4));
- clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_main", 1, 10);
- clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_main", 1, 20);
+ clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_enet", 1, 10);
+ clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_enet", 1, 20);
clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4);
clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index a6147f9fce7f..3d383cb5e6d1 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -107,8 +107,8 @@ void imx_gpc_pre_suspend(bool arm_power_off);
void imx_gpc_post_resume(void);
void imx_gpc_mask_all(void);
void imx_gpc_restore_all(void);
-void imx_gpc_irq_mask(struct irq_data *d);
-void imx_gpc_irq_unmask(struct irq_data *d);
+void imx_gpc_hwirq_mask(unsigned int hwirq);
+void imx_gpc_hwirq_unmask(unsigned int hwirq);
void imx_anatop_init(void);
void imx_anatop_pre_suspend(void);
void imx_anatop_post_resume(void);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 82ea74e68482..5f3602ec74fa 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -56,14 +56,14 @@ void imx_gpc_post_resume(void)
static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
{
- unsigned int idx = d->irq / 32 - 1;
+ unsigned int idx = d->hwirq / 32 - 1;
u32 mask;
/* Sanity check for SPI irq */
- if (d->irq < 32)
+ if (d->hwirq < 32)
return -EINVAL;
- mask = 1 << d->irq % 32;
+ mask = 1 << d->hwirq % 32;
gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
gpc_wake_irqs[idx] & ~mask;
@@ -91,34 +91,44 @@ void imx_gpc_restore_all(void)
writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
}
-void imx_gpc_irq_unmask(struct irq_data *d)
+void imx_gpc_hwirq_unmask(unsigned int hwirq)
{
void __iomem *reg;
u32 val;
- /* Sanity check for SPI irq */
- if (d->irq < 32)
- return;
-
- reg = gpc_base + GPC_IMR1 + (d->irq / 32 - 1) * 4;
+ reg = gpc_base + GPC_IMR1 + (hwirq / 32 - 1) * 4;
val = readl_relaxed(reg);
- val &= ~(1 << d->irq % 32);
+ val &= ~(1 << hwirq % 32);
writel_relaxed(val, reg);
}
-void imx_gpc_irq_mask(struct irq_data *d)
+void imx_gpc_hwirq_mask(unsigned int hwirq)
{
void __iomem *reg;
u32 val;
+ reg = gpc_base + GPC_IMR1 + (hwirq / 32 - 1) * 4;
+ val = readl_relaxed(reg);
+ val |= 1 << (hwirq % 32);
+ writel_relaxed(val, reg);
+}
+
+static void imx_gpc_irq_unmask(struct irq_data *d)
+{
+ /* Sanity check for SPI irq */
+ if (d->hwirq < 32)
+ return;
+
+ imx_gpc_hwirq_unmask(d->hwirq);
+}
+
+static void imx_gpc_irq_mask(struct irq_data *d)
+{
/* Sanity check for SPI irq */
- if (d->irq < 32)
+ if (d->hwirq < 32)
return;
- reg = gpc_base + GPC_IMR1 + (d->irq / 32 - 1) * 4;
- val = readl_relaxed(reg);
- val |= 1 << (d->irq % 32);
- writel_relaxed(val, reg);
+ imx_gpc_hwirq_mask(d->hwirq);
}
void __init imx_gpc_init(void)
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 5c3af8f993d0..d815d1ba27a5 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -261,7 +261,6 @@ static void imx6q_enable_wb(bool enable)
int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
{
- struct irq_data *iomuxc_irq_data = irq_get_irq_data(32);
u32 val = readl_relaxed(ccm_base + CLPCR);
val &= ~BM_CLPCR_LPM;
@@ -316,9 +315,9 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
* 3) Software should mask IRQ #32 right after CCM Low-Power mode
* is set (set bits 0-1 of CCM_CLPCR).
*/
- imx_gpc_irq_unmask(iomuxc_irq_data);
+ imx_gpc_hwirq_unmask(32);
writel_relaxed(val, ccm_base + CLPCR);
- imx_gpc_irq_mask(iomuxc_irq_data);
+ imx_gpc_hwirq_mask(32);
return 0;
}
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 559c69a47731..7d11979da030 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -76,7 +76,7 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
u32 n, byte_enables, data;
if (!is_pci_memory(addr)) {
- __raw_writeb(value, addr);
+ __raw_writeb(value, p);
return;
}
@@ -141,7 +141,7 @@ static inline unsigned char __indirect_readb(const volatile void __iomem *p)
u32 n, byte_enables, data;
if (!is_pci_memory(addr))
- return __raw_readb(addr);
+ return __raw_readb(p);
n = addr % 4;
byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 6478626e3ff6..d0d39f150fab 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -188,7 +188,7 @@ static void __init thermal_quirk(void)
static void __init mvebu_dt_init(void)
{
- if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
+ if (of_machine_is_compatible("marvell,armadaxp"))
i2c_quirk();
if (of_machine_is_compatible("marvell,a375-db")) {
external_abort_quirk();
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 2bdc3233abe2..044b51185fcc 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -400,6 +400,8 @@ int __init coherency_init(void)
type == COHERENCY_FABRIC_TYPE_ARMADA_380)
armada_375_380_coherency_init(np);
+ of_node_put(np);
+
return 0;
}
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 97767a27ca9d..e0ad64fde20e 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -21,8 +21,10 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/usb/musb.h>
+#include <linux/mmc/host.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/platform_data/mtd-onenand-omap2.h>
+#include <linux/platform_data/mmc-omap.h>
#include <linux/mfd/menelaus.h>
#include <sound/tlv320aic3x.h>
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ddfc8df83c6a..3d5040f82e90 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -484,7 +484,7 @@ static struct omap_mux_partition *partition;
* Current flows to eMMC when eMMC is off and the data lines are pulled up,
* so pull them down. N.B. we pull 8 lines because we are using 8 lines.
*/
-static void rx51_mmc2_remux(struct device *dev, int slot, int power_on)
+static void rx51_mmc2_remux(struct device *dev, int power_on)
{
if (power_on)
omap_mux_write_array(partition, rx51_mmc2_on_mux);
@@ -500,7 +500,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
.cover_only = true,
.gpio_cd = 160,
.gpio_wp = -EINVAL,
- .power_saving = true,
},
{
.name = "internal",
@@ -510,7 +509,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.nonremovable = true,
- .power_saving = true,
.remux = rx51_mmc2_remux,
},
{} /* Terminator */
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 324f02bf8a51..492ef1607115 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -49,7 +49,7 @@ static int __init omap3_l3_init(void)
* To avoid code running on other OMAPs in
* multi-omap builds
*/
- if (!(cpu_is_omap34xx()))
+ if (!(cpu_is_omap34xx()) || of_have_populated_dt())
return -ENODEV;
snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main");
@@ -67,40 +67,6 @@ static int __init omap3_l3_init(void)
}
omap_postcore_initcall(omap3_l3_init);
-static int __init omap4_l3_init(void)
-{
- int i;
- struct omap_hwmod *oh[3];
- struct platform_device *pdev;
- char oh_name[L3_MODULES_MAX_LEN];
-
- /* If dtb is there, the devices will be created dynamically */
- if (of_have_populated_dt())
- return -ENODEV;
-
- /*
- * To avoid code running on other OMAPs in
- * multi-omap builds
- */
- if (!cpu_is_omap44xx() && !soc_is_omap54xx())
- return -ENODEV;
-
- for (i = 0; i < L3_MODULES; i++) {
- snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main_%d", i+1);
-
- oh[i] = omap_hwmod_lookup(oh_name);
- if (!(oh[i]))
- pr_err("could not look up %s\n", oh_name);
- }
-
- pdev = omap_device_build_ss("omap_l3_noc", 0, oh, 3, NULL, 0);
-
- WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
-
- return PTR_RET(pdev);
-}
-omap_postcore_initcall(omap4_l3_init);
-
#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
static struct resource omap2cam_resources[] = {
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 5fa3755261ce..8fb5bbce102f 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1394,8 +1394,6 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
if (gpmc_nand_data->elm_of_node == NULL)
gpmc_nand_data->elm_of_node =
of_parse_phandle(child, "elm_id", 0);
- if (gpmc_nand_data->elm_of_node == NULL)
- pr_warn("%s: ti,elm-id property not found\n", __func__);
/* select ecc-scheme for NAND */
if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 07d4c7b35754..dc6e79c4484a 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -14,14 +14,15 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/mmc/host.h>
#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include "soc.h"
#include "omap_device.h"
#include "omap-pm.h"
#include "mux.h"
-#include "mmc.h"
#include "hsmmc.h"
#include "control.h"
@@ -32,25 +33,14 @@ static u16 control_devconf1_offset;
#define HSMMC_NAME_LEN 9
-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
-
-static int hsmmc_get_context_loss(struct device *dev)
-{
- return omap_pm_get_dev_context_loss_count(dev);
-}
-
-#else
-#define hsmmc_get_context_loss NULL
-#endif
-
-static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
- int power_on, int vdd)
+static void omap_hsmmc1_before_set_reg(struct device *dev,
+ int power_on, int vdd)
{
u32 reg, prog_io;
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_platform_data *mmc = dev->platform_data;
- if (mmc->slots[0].remux)
- mmc->slots[0].remux(dev, slot, power_on);
+ if (mmc->remux)
+ mmc->remux(dev, power_on);
/*
* Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
@@ -72,7 +62,7 @@ static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1);
}
- if (mmc->slots[0].internal_clock) {
+ if (mmc->internal_clock) {
reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
reg |= OMAP2_MMCSDIO1ADPCLKISEL;
omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0);
@@ -96,8 +86,7 @@ static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
}
}
-static void omap_hsmmc1_after_set_reg(struct device *dev, int slot,
- int power_on, int vdd)
+static void omap_hsmmc1_after_set_reg(struct device *dev, int power_on, int vdd)
{
u32 reg;
@@ -120,34 +109,32 @@ static void omap_hsmmc1_after_set_reg(struct device *dev, int slot,
}
}
-static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
+static void hsmmc2_select_input_clk_src(struct omap_hsmmc_platform_data *mmc)
{
u32 reg;
reg = omap_ctrl_readl(control_devconf1_offset);
- if (mmc->slots[0].internal_clock)
+ if (mmc->internal_clock)
reg |= OMAP2_MMCSDIO2ADPCLKISEL;
else
reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
omap_ctrl_writel(reg, control_devconf1_offset);
}
-static void hsmmc2_before_set_reg(struct device *dev, int slot,
- int power_on, int vdd)
+static void hsmmc2_before_set_reg(struct device *dev, int power_on, int vdd)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_platform_data *mmc = dev->platform_data;
- if (mmc->slots[0].remux)
- mmc->slots[0].remux(dev, slot, power_on);
+ if (mmc->remux)
+ mmc->remux(dev, power_on);
if (power_on)
hsmmc2_select_input_clk_src(mmc);
}
-static int am35x_hsmmc2_set_power(struct device *dev, int slot,
- int power_on, int vdd)
+static int am35x_hsmmc2_set_power(struct device *dev, int power_on, int vdd)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_platform_data *mmc = dev->platform_data;
if (power_on)
hsmmc2_select_input_clk_src(mmc);
@@ -155,23 +142,22 @@ static int am35x_hsmmc2_set_power(struct device *dev, int slot,
return 0;
}
-static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
- int vdd)
+static int nop_mmc_set_power(struct device *dev, int power_on, int vdd)
{
return 0;
}
-static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
- int controller_nr)
+static inline void omap_hsmmc_mux(struct omap_hsmmc_platform_data
+ *mmc_controller, int controller_nr)
{
- if (gpio_is_valid(mmc_controller->slots[0].switch_pin) &&
- (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
- omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
- OMAP_PIN_INPUT_PULLUP);
- if (gpio_is_valid(mmc_controller->slots[0].gpio_wp) &&
- (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
- omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
- OMAP_PIN_INPUT_PULLUP);
+ if (gpio_is_valid(mmc_controller->switch_pin) &&
+ (mmc_controller->switch_pin < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->switch_pin,
+ OMAP_PIN_INPUT_PULLUP);
+ if (gpio_is_valid(mmc_controller->gpio_wp) &&
+ (mmc_controller->gpio_wp < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->gpio_wp,
+ OMAP_PIN_INPUT_PULLUP);
if (cpu_is_omap34xx()) {
if (controller_nr == 0) {
omap_mux_init_signal("sdmmc1_clk",
@@ -180,7 +166,7 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("sdmmc1_dat0",
OMAP_PIN_INPUT_PULLUP);
- if (mmc_controller->slots[0].caps &
+ if (mmc_controller->caps &
(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
omap_mux_init_signal("sdmmc1_dat1",
OMAP_PIN_INPUT_PULLUP);
@@ -189,7 +175,7 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
omap_mux_init_signal("sdmmc1_dat3",
OMAP_PIN_INPUT_PULLUP);
}
- if (mmc_controller->slots[0].caps &
+ if (mmc_controller->caps &
MMC_CAP_8_BIT_DATA) {
omap_mux_init_signal("sdmmc1_dat4",
OMAP_PIN_INPUT_PULLUP);
@@ -214,7 +200,7 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
* For 8 wire configurations, Lines DAT4, 5, 6 and 7
* need to be muxed in the board-*.c files
*/
- if (mmc_controller->slots[0].caps &
+ if (mmc_controller->caps &
(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
omap_mux_init_signal("sdmmc2_dat1",
OMAP_PIN_INPUT_PULLUP);
@@ -223,7 +209,7 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
omap_mux_init_signal("sdmmc2_dat3",
OMAP_PIN_INPUT_PULLUP);
}
- if (mmc_controller->slots[0].caps &
+ if (mmc_controller->caps &
MMC_CAP_8_BIT_DATA) {
omap_mux_init_signal("sdmmc2_dat4.sdmmc2_dat4",
OMAP_PIN_INPUT_PULLUP);
@@ -243,7 +229,7 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
}
static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
- struct omap_mmc_platform_data *mmc)
+ struct omap_hsmmc_platform_data *mmc)
{
char *hc_name;
@@ -259,38 +245,22 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
else
snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i",
c->mmc, 1);
- mmc->slots[0].name = hc_name;
- mmc->nr_slots = 1;
- mmc->slots[0].caps = c->caps;
- mmc->slots[0].pm_caps = c->pm_caps;
- mmc->slots[0].internal_clock = !c->ext_clock;
- mmc->max_freq = c->max_freq;
+ mmc->name = hc_name;
+ mmc->caps = c->caps;
+ mmc->internal_clock = !c->ext_clock;
mmc->reg_offset = 0;
- mmc->get_context_loss_count = hsmmc_get_context_loss;
- mmc->slots[0].switch_pin = c->gpio_cd;
- mmc->slots[0].gpio_wp = c->gpio_wp;
+ mmc->switch_pin = c->gpio_cd;
+ mmc->gpio_wp = c->gpio_wp;
- mmc->slots[0].remux = c->remux;
- mmc->slots[0].init_card = c->init_card;
+ mmc->remux = c->remux;
+ mmc->init_card = c->init_card;
if (c->cover_only)
- mmc->slots[0].cover = 1;
+ mmc->cover = 1;
if (c->nonremovable)
- mmc->slots[0].nonremovable = 1;
-
- if (c->power_saving)
- mmc->slots[0].power_saving = 1;
-
- if (c->no_off)
- mmc->slots[0].no_off = 1;
-
- if (c->no_off_init)
- mmc->slots[0].no_regulator_off_init = c->no_off_init;
-
- if (c->vcc_aux_disable_is_sleep)
- mmc->slots[0].vcc_aux_disable_is_sleep = 1;
+ mmc->nonremovable = 1;
/*
* NOTE: MMC slots should have a Vcc regulator set up.
@@ -300,42 +270,42 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
* temporary HACK: ocr_mask instead of fixed supply
*/
if (soc_is_am35xx())
- mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
+ mmc->ocr_mask = MMC_VDD_165_195 |
MMC_VDD_26_27 |
MMC_VDD_27_28 |
MMC_VDD_29_30 |
MMC_VDD_30_31 |
MMC_VDD_31_32;
else
- mmc->slots[0].ocr_mask = c->ocr_mask;
+ mmc->ocr_mask = c->ocr_mask;
if (!soc_is_am35xx())
- mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+ mmc->features |= HSMMC_HAS_PBIAS;
switch (c->mmc) {
case 1:
- if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+ if (mmc->features & HSMMC_HAS_PBIAS) {
/* on-chip level shifting via PBIAS0/PBIAS1 */
- mmc->slots[0].before_set_reg =
+ mmc->before_set_reg =
omap_hsmmc1_before_set_reg;
- mmc->slots[0].after_set_reg =
+ mmc->after_set_reg =
omap_hsmmc1_after_set_reg;
}
if (soc_is_am35xx())
- mmc->slots[0].set_power = nop_mmc_set_power;
+ mmc->set_power = nop_mmc_set_power;
/* OMAP3630 HSMMC1 supports only 4-bit */
if (cpu_is_omap3630() &&
(c->caps & MMC_CAP_8_BIT_DATA)) {
c->caps &= ~MMC_CAP_8_BIT_DATA;
c->caps |= MMC_CAP_4_BIT_DATA;
- mmc->slots[0].caps = c->caps;
+ mmc->caps = c->caps;
}
break;
case 2:
if (soc_is_am35xx())
- mmc->slots[0].set_power = am35x_hsmmc2_set_power;
+ mmc->set_power = am35x_hsmmc2_set_power;
if (c->ext_clock)
c->transceiver = 1;
@@ -343,17 +313,17 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
c->caps &= ~MMC_CAP_8_BIT_DATA;
c->caps |= MMC_CAP_4_BIT_DATA;
}
- if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+ if (mmc->features & HSMMC_HAS_PBIAS) {
/* off-chip level shifting, or none */
- mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
- mmc->slots[0].after_set_reg = NULL;
+ mmc->before_set_reg = hsmmc2_before_set_reg;
+ mmc->after_set_reg = NULL;
}
break;
case 3:
case 4:
case 5:
- mmc->slots[0].before_set_reg = NULL;
- mmc->slots[0].after_set_reg = NULL;
+ mmc->before_set_reg = NULL;
+ mmc->after_set_reg = NULL;
break;
default:
pr_err("MMC%d configuration not supported!\n", c->mmc);
@@ -368,7 +338,7 @@ static int omap_hsmmc_done;
void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
{
struct platform_device *pdev;
- struct omap_mmc_platform_data *mmc_pdata;
+ struct omap_hsmmc_platform_data *mmc_pdata;
int res;
if (omap_hsmmc_done != 1)
@@ -388,8 +358,8 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
if (!mmc_pdata)
continue;
- mmc_pdata->slots[0].switch_pin = c->gpio_cd;
- mmc_pdata->slots[0].gpio_wp = c->gpio_wp;
+ mmc_pdata->switch_pin = c->gpio_cd;
+ mmc_pdata->gpio_wp = c->gpio_wp;
res = omap_device_register(pdev);
if (res)
@@ -408,12 +378,12 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
struct omap_device *od;
struct platform_device *pdev;
char oh_name[MAX_OMAP_MMC_HWMOD_NAME_LEN];
- struct omap_mmc_platform_data *mmc_data;
- struct omap_mmc_dev_attr *mmc_dev_attr;
+ struct omap_hsmmc_platform_data *mmc_data;
+ struct omap_hsmmc_dev_attr *mmc_dev_attr;
char *name;
int res;
- mmc_data = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
+ mmc_data = kzalloc(sizeof(*mmc_data), GFP_KERNEL);
if (!mmc_data) {
pr_err("Cannot allocate memory for mmc device!\n");
return;
@@ -463,7 +433,7 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
}
res = platform_device_add_data(pdev, mmc_data,
- sizeof(struct omap_mmc_platform_data));
+ sizeof(struct omap_hsmmc_platform_data));
if (res) {
pr_err("Could not add pdata for %s\n", name);
goto put_pdev;
@@ -489,7 +459,7 @@ put_pdev:
platform_device_put(pdev);
free_name:
- kfree(mmc_data->slots[0].name);
+ kfree(mmc_data->name);
free_mmc:
kfree(mmc_data);
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index 7f2e790e0929..148cd9b15499 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -12,25 +12,18 @@ struct omap2_hsmmc_info {
u8 mmc; /* controller 1/2/3 */
u32 caps; /* 4/8 wires and any additional host
* capabilities OR'd (ref. linux/mmc/host.h) */
- u32 pm_caps; /* PM capabilities */
bool transceiver; /* MMC-2 option */
bool ext_clock; /* use external pin for input clock */
bool cover_only; /* No card detect - just cover switch */
bool nonremovable; /* Nonremovable e.g. eMMC */
- bool power_saving; /* Try to sleep or power off when possible */
- bool no_off; /* power_saving and power is not to go off */
- bool no_off_init; /* no power off when not in MMC sleep state */
- bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */
bool deferred; /* mmc needs a deferred probe */
int gpio_cd; /* or -EINVAL */
int gpio_wp; /* or -EINVAL */
char *name; /* or NULL for default */
struct platform_device *pdev; /* mmc controller instance */
int ocr_mask; /* temporary HACK */
- int max_freq; /* maximum clock, if constrained by external
- * circuitry, or 0 for default */
/* Remux (pad configuration) when powering on/off */
- void (*remux)(struct device *dev, int slot, int power_on);
+ void (*remux)(struct device *dev, int power_on);
/* init some special card */
void (*init_card)(struct mmc_card *card);
};
diff --git a/arch/arm/mach-omap2/mmc.h b/arch/arm/mach-omap2/mmc.h
index 0cd4b089da9c..30d39b97e7dd 100644
--- a/arch/arm/mach-omap2/mmc.h
+++ b/arch/arm/mach-omap2/mmc.h
@@ -1,5 +1,3 @@
-#include <linux/mmc/host.h>
-#include <linux/platform_data/mmc-omap.h>
#define OMAP24XX_NR_MMC 2
#define OMAP2420_MMC_SIZE OMAP1_MMC_SIZE
@@ -7,14 +5,6 @@
#define OMAP4_MMC_REG_OFFSET 0x100
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
-void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
-#else
-static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
-{
-}
-#endif
-
struct omap_hwmod;
int omap_msdi_reset(struct omap_hwmod *oh);
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16b20cedc38d..b7cb44abe49b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -36,7 +36,6 @@
#include "soc.h"
#include "iomap.h"
#include "common.h"
-#include "mmc.h"
#include "prminst44xx.h"
#include "prcm_mpu44xx.h"
#include "omap4-sar-layout.h"
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index d22c30d3ccfa..8c58b71c2727 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -917,6 +917,10 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
static int __init omap_device_late_init(void)
{
bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
+
+ WARN(!of_have_populated_dt(),
+ "legacy booting deprecated, please update to boot with .dts\n");
+
return 0;
}
omap_late_initcall_sync(omap_device_late_init);
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index c2555cb95e71..79127b35fe60 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -15,12 +15,12 @@
#include <linux/i2c-omap.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/omap-dma.h>
#include <plat/dmtimer.h>
#include "omap_hwmod.h"
-#include "mmc.h"
#include "l3_2xxx.h"
#include "soc.h"
@@ -372,7 +372,7 @@ static struct omap_hwmod_opt_clk omap2430_mmc1_opt_clks[] = {
{ .role = "dbck", .clk = "mmchsdb1_fck" },
};
-static struct omap_mmc_dev_attr mmc1_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index a579b89ce9b7..cabc5695b504 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -15,10 +15,10 @@
*/
#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include "omap_hwmod.h"
#include "i2c.h"
-#include "mmc.h"
#include "wd_timer.h"
#include "cm33xx.h"
#include "prm33xx.h"
@@ -836,7 +836,7 @@ static struct omap_hwmod_class am33xx_mmc_hwmod_class = {
};
/* mmc0 */
-static struct omap_mmc_dev_attr am33xx_mmc0_dev_attr = {
+static struct omap_hsmmc_dev_attr am33xx_mmc0_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
@@ -854,7 +854,7 @@ struct omap_hwmod am33xx_mmc0_hwmod = {
};
/* mmc1 */
-static struct omap_mmc_dev_attr am33xx_mmc1_dev_attr = {
+static struct omap_hsmmc_dev_attr am33xx_mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
@@ -872,7 +872,7 @@ struct omap_hwmod am33xx_mmc1_hwmod = {
};
/* mmc2 */
-static struct omap_mmc_dev_attr am33xx_mmc2_dev_attr = {
+static struct omap_hsmmc_dev_attr am33xx_mmc2_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
struct omap_hwmod am33xx_mmc2_hwmod = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 6b406ca4bd3b..0cf7b563dcd1 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -27,7 +27,6 @@
#include "prm33xx.h"
#include "prm-regbits-33xx.h"
#include "i2c.h"
-#include "mmc.h"
#include "wd_timer.h"
#include "omap_hwmod_33xx_43xx_common_data.h"
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 2a78b093c0ce..11468eea3871 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -18,6 +18,7 @@
#include <linux/i2c-omap.h>
#include <linux/power/smartreflex.h>
#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include <linux/omap-dma.h>
#include "l3_3xxx.h"
@@ -37,7 +38,6 @@
#include "cm-regbits-34xx.h"
#include "i2c.h"
-#include "mmc.h"
#include "wd_timer.h"
#include "serial.h"
@@ -1786,12 +1786,12 @@ static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = {
{ .role = "dbck", .clk = "omap_32k_fck", },
};
-static struct omap_mmc_dev_attr mmc1_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
/* See 35xx errata 2.1.1.128 in SPRZ278F */
-static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc1_pre_es3_dev_attr = {
.flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT |
OMAP_HSMMC_BROKEN_MULTIBLOCK_READ),
};
@@ -1854,7 +1854,7 @@ static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = {
};
/* See 35xx errata 2.1.1.128 in SPRZ278F */
-static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc2_pre_es3_dev_attr = {
.flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 44e5634bba34..d8a3cf1c1787 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include <linux/power/smartreflex.h>
#include <linux/i2c-omap.h>
@@ -39,7 +40,6 @@
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
#include "i2c.h"
-#include "mmc.h"
#include "wd_timer.h"
/* Base offset for all OMAP4 interrupts external to MPUSS */
@@ -1952,7 +1952,7 @@ static struct omap_hwmod_dma_info omap44xx_mmc1_sdma_reqs[] = {
};
/* mmc1 dev_attr */
-static struct omap_mmc_dev_attr mmc1_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 1103aa0e0d29..5ec786a76d3c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include <linux/power/smartreflex.h>
#include <linux/i2c-omap.h>
@@ -33,7 +34,6 @@
#include "cm2_54xx.h"
#include "prm54xx.h"
#include "i2c.h"
-#include "mmc.h"
#include "wd_timer.h"
/* Base offset for all OMAP5 interrupts external to MPUSS */
@@ -1269,7 +1269,7 @@ static struct omap_hwmod_opt_clk mmc1_opt_clks[] = {
};
/* mmc1 dev_attr */
-static struct omap_mmc_dev_attr mmc1_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 5684f112654b..711c97e90990 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/hsmmc-omap.h>
#include <linux/power/smartreflex.h>
#include <linux/i2c-omap.h>
@@ -33,7 +34,6 @@
#include "cm2_7xx.h"
#include "prm7xx.h"
#include "i2c.h"
-#include "mmc.h"
#include "wd_timer.h"
#include "soc.h"
@@ -1301,7 +1301,7 @@ static struct omap_hwmod_opt_clk mmc1_opt_clks[] = {
};
/* mmc1 dev_attr */
-static struct omap_mmc_dev_attr mmc1_dev_attr = {
+static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 50640b38f0bf..1a19fa096bab 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -97,13 +99,13 @@ void am35x_musb_phy_power(u8 on)
omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
- pr_info(KERN_INFO "Waiting for PHY clock good...\n");
+ pr_info("Waiting for PHY clock good...\n");
while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
& CONF2_PHYCLKGD)) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- pr_err(KERN_ERR "musb PHY clock good timed out\n");
+ pr_err("musb PHY clock good timed out\n");
break;
}
}
@@ -145,7 +147,7 @@ void am35x_set_mode(u8 musb_mode)
devconf2 |= CONF2_NO_OVERRIDE;
break;
default:
- pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
+ pr_info("Unsupported mode %u\n", musb_mode);
}
omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index a388f8c1bcb3..57dee0c7cd2b 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -263,9 +263,6 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
omap_up.dma_rx_timeout = info->dma_rx_timeout;
omap_up.dma_rx_poll_rate = info->dma_rx_poll_rate;
omap_up.autosuspend_timeout = info->autosuspend_timeout;
- omap_up.DTR_gpio = info->DTR_gpio;
- omap_up.DTR_inverted = info->DTR_inverted;
- omap_up.DTR_present = info->DTR_present;
pdata = &omap_up;
pdata_size = sizeof(struct omap_uart_port_info);
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
index bbf9df37ad4b..d28fe291233a 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/include/mach/addr-map.h
@@ -39,6 +39,11 @@
#define DMEMC_SIZE 0x00100000
/*
+ * Reserved space for low level debug virtual addresses within
+ * 0xf6200000..0xf6201000
+ */
+
+/*
* Internal Memory Controller (PXA27x and later)
*/
#define IMEMC_PHYS 0x58000000
diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c
index 969e85dad09b..9cac8247c72b 100644
--- a/arch/arm/mach-shmobile/clock-r8a7740.c
+++ b/arch/arm/mach-shmobile/clock-r8a7740.c
@@ -451,7 +451,7 @@ enum {
MSTP128, MSTP127, MSTP125,
MSTP116, MSTP111, MSTP100, MSTP117,
- MSTP230,
+ MSTP230, MSTP229,
MSTP222,
MSTP218, MSTP217, MSTP216, MSTP214,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
@@ -470,11 +470,12 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */
[MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
- [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
+ [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */
[MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */
[MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
[MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */
+ [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 29, 0), /* INTCA */
[MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */
[MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
[MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
@@ -571,6 +572,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]),
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]),
CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]),
+ CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP229]),
+ CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP229]),
+ CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP229]),
+ CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP229]),
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]),
CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]),
diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c
index c395ff194254..f9bbc5f0a9a1 100644
--- a/arch/arm/mach-shmobile/clock-r8a7790.c
+++ b/arch/arm/mach-shmobile/clock-r8a7790.c
@@ -64,7 +64,7 @@
#define SDCKCR 0xE6150074
#define SD2CKCR 0xE6150078
-#define SD3CKCR 0xE615007C
+#define SD3CKCR 0xE615026C
#define MMC0CKCR 0xE6150240
#define MMC1CKCR 0xE6150244
#define SSPCKCR 0xE6150248
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index ca346cce4f30..1ff4bd65e647 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/input.h>
+#include <linux/i2c/i2c-sh_mobile.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/sh_dma.h>
@@ -188,11 +189,18 @@ static struct resource i2c4_resources[] = {
},
};
+static struct i2c_sh_mobile_platform_data i2c_platform_data = {
+ .clks_per_count = 2,
+};
+
static struct platform_device i2c0_device = {
.name = "i2c-sh_mobile",
.id = 0,
.resource = i2c0_resources,
.num_resources = ARRAY_SIZE(i2c0_resources),
+ .dev = {
+ .platform_data = &i2c_platform_data,
+ },
};
static struct platform_device i2c1_device = {
@@ -200,6 +208,9 @@ static struct platform_device i2c1_device = {
.id = 1,
.resource = i2c1_resources,
.num_resources = ARRAY_SIZE(i2c1_resources),
+ .dev = {
+ .platform_data = &i2c_platform_data,
+ },
};
static struct platform_device i2c2_device = {
@@ -207,6 +218,9 @@ static struct platform_device i2c2_device = {
.id = 2,
.resource = i2c2_resources,
.num_resources = ARRAY_SIZE(i2c2_resources),
+ .dev = {
+ .platform_data = &i2c_platform_data,
+ },
};
static struct platform_device i2c3_device = {
@@ -214,6 +228,9 @@ static struct platform_device i2c3_device = {
.id = 3,
.resource = i2c3_resources,
.num_resources = ARRAY_SIZE(i2c3_resources),
+ .dev = {
+ .platform_data = &i2c_platform_data,
+ },
};
static struct platform_device i2c4_device = {
@@ -221,6 +238,9 @@ static struct platform_device i2c4_device = {
.id = 4,
.resource = i2c4_resources,
.num_resources = ARRAY_SIZE(i2c4_resources),
+ .dev = {
+ .platform_data = &i2c_platform_data,
+ },
};
static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index da7be13aecce..ab95f5391a2b 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
static void tegra_mask(struct irq_data *d)
{
- if (d->irq < FIRST_LEGACY_IRQ)
+ if (d->hwirq < FIRST_LEGACY_IRQ)
return;
- tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR);
+ tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
}
static void tegra_unmask(struct irq_data *d)
{
- if (d->irq < FIRST_LEGACY_IRQ)
+ if (d->hwirq < FIRST_LEGACY_IRQ)
return;
- tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET);
+ tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
}
static void tegra_ack(struct irq_data *d)
{
- if (d->irq < FIRST_LEGACY_IRQ)
+ if (d->hwirq < FIRST_LEGACY_IRQ)
return;
- tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+ tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
}
static void tegra_eoi(struct irq_data *d)
{
- if (d->irq < FIRST_LEGACY_IRQ)
+ if (d->hwirq < FIRST_LEGACY_IRQ)
return;
- tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+ tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
}
static int tegra_retrigger(struct irq_data *d)
{
- if (d->irq < FIRST_LEGACY_IRQ)
+ if (d->hwirq < FIRST_LEGACY_IRQ)
return 0;
- tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET);
+ tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
return 1;
}
@@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d)
#ifdef CONFIG_PM_SLEEP
static int tegra_set_wake(struct irq_data *d, unsigned int enable)
{
- u32 irq = d->irq;
+ u32 irq = d->hwirq;
u32 index, mask;
if (irq < FIRST_LEGACY_IRQ ||
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index 7b2baab0f0bd..71be4af5e975 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -51,6 +51,7 @@ ENTRY(tegra_resume)
THUMB( it ne )
bne cpu_resume @ no
+ tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
/* Are we on Tegra20? */
cmp r6, #TEGRA20
beq 1f @ Yes
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ae69809a9e47..7eb94e6fc376 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS
config KUSER_HELPERS
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+ depends on MMU
default y
help
Warning: disabling this option may break user programs.
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3a947863ac7..22ac2a6fbfe3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
/* Auxiliary Debug Modes Control 1 Register */
#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
/* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
/* Auxiliary Debug Modes Control 1 Register */
mrc p15, 1, r0, c15, c1, 1
orr r0, r0, #PJ4B_CLEAN_LINE
- orr r0, r0, #PJ4B_BCK_OFF_STREX
orr r0, r0, #PJ4B_INTER_PARITY
bic r0, r0, #PJ4B_STATIC_BP
mcr p15, 1, r0, c15, c1, 1
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 23259f104c66..afa2b3c4df4a 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
mrc p15, 0, r6, c13, c0, 0 @ PID
mrc p15, 0, r7, c3, c0, 0 @ domain ID
- mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
+ mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mrc p15, 0, r9, c1, c0, 0 @ control reg
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
mcr p15, 0, r6, c13, c0, 0 @ PID
mcr p15, 0, r7, c3, c0, 0 @ domain ID
mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
- mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
+ mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
ENDPROC(cpu_xscale_do_resume)
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index b61a3bcc2fa8..e048f6198d68 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -497,6 +497,34 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define orion_gpio_dbg_show NULL
#endif
+static void orion_gpio_unmask_irq(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 reg_val;
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
+ reg_val |= mask;
+ irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+
+static void orion_gpio_mask_irq(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+ u32 reg_val;
+
+ irq_gc_lock(gc);
+ reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
+ reg_val &= ~mask;
+ irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+
void __init orion_gpio_init(struct device_node *np,
int gpio_base, int ngpio,
void __iomem *base, int mask_offset,
@@ -565,8 +593,8 @@ void __init orion_gpio_init(struct device_node *np,
ct = gc->chip_types;
ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_mask = orion_gpio_mask_irq;
+ ct->chip.irq_unmask = orion_gpio_unmask_irq;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->chip.name = ochip->chip.label;
@@ -575,8 +603,8 @@ void __init orion_gpio_init(struct device_node *np,
ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
ct->chip.irq_ack = irq_gc_ack_clr_bit;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_mask = orion_gpio_mask_irq;
+ ct->chip.irq_unmask = orion_gpio_unmask_irq;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->handler = handle_edge_irq;
ct->chip.name = ochip->chip.label;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9532f8d5857e..7c79c6494379 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -34,13 +34,16 @@ config ARM64
select GENERIC_TIME_VSYSCALL
select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
+ select HAVE_CMPXCHG_DOUBLE
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
@@ -193,6 +196,114 @@ endmenu
menu "Kernel Features"
+menu "ARM errata workarounds via the alternatives framework"
+
+config ARM64_ERRATUM_826319
+ bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or
+ AXI master interface and an L2 cache.
+
+ If a Cortex-A53 uses an AMBA AXI4 ACE interface to other processors
+ and is unable to accept a certain write via this interface, it will
+ not progress on read data presented on the read data channel and the
+ system can deadlock.
+
+ The workaround promotes data cache clean instructions to
+ data cache clean-and-invalidate.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_827319
+ bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect"
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI
+ master interface and an L2 cache.
+
+ Under certain conditions this erratum can cause a clean line eviction
+ to occur at the same time as another transaction to the same address
+ on the AMBA 5 CHI interface, which can cause data corruption if the
+ interconnect reorders the two transactions.
+
+ The workaround promotes data cache clean instructions to
+ data cache clean-and-invalidate.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_824069
+ bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop"
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected
+ to a coherent interconnect.
+
+ If a Cortex-A53 processor is executing a store or prefetch for
+ write instruction at the same time as a processor in another
+ cluster is executing a cache maintenance operation to the same
+ address, then this erratum might cause a clean cache line to be
+ incorrectly marked as dirty.
+
+ The workaround promotes data cache clean instructions to
+ data cache clean-and-invalidate.
+ Please note that this option does not necessarily enable the
+ workaround, as it depends on the alternative framework, which will
+ only patch the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_819472
+ bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption"
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache
+ present when it is connected to a coherent interconnect.
+
+ If the processor is executing a load and store exclusive sequence at
+ the same time as a processor in another cluster is executing a cache
+ maintenance operation to the same address, then this erratum might
+ cause data corruption.
+
+ The workaround promotes data cache clean instructions to
+ data cache clean-and-invalidate.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_832075
+ bool "Cortex-A57: 832075: possible deadlock on mixing exclusive memory accesses with device loads"
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 832075 on Cortex-A57 parts up to r1p2.
+
+ Affected Cortex-A57 parts might deadlock when exclusive load/store
+ instructions to Write-Back memory are mixed with Device loads.
+
+ The workaround is to promote device loads to use Load-Acquire
+ semantics.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
+endmenu
+
+
choice
prompt "Page size"
default ARM64_4K_PAGES
@@ -345,6 +456,19 @@ config ARCH_HAS_CACHE_LINE_SIZE
source "mm/Kconfig"
+config SECCOMP
+ bool "Enable seccomp to safely compute untrusted bytecode"
+ ---help---
+ This kernel feature is useful for number crunching applications
+ that may need to compute untrusted bytecode during their
+ execution. By using pipes or other transports made available to
+ the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in
+ their own address space using seccomp. Once seccomp is
+ enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+ and the task is only allowed to execute a few safe syscalls
+ defined by each seccomp mode.
+
config XEN_DOM0
def_bool y
depends on XEN
@@ -361,6 +485,58 @@ config FORCE_MAX_ZONEORDER
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
default "11"
+menuconfig ARMV8_DEPRECATED
+ bool "Emulate deprecated/obsolete ARMv8 instructions"
+ depends on COMPAT
+ help
+ Legacy software support may require certain instructions
+ that have been deprecated or obsoleted in the architecture.
+
+ Enable this config to enable selective emulation of these
+ features.
+
+ If unsure, say Y
+
+if ARMV8_DEPRECATED
+
+config SWP_EMULATION
+ bool "Emulate SWP/SWPB instructions"
+ help
+ ARMv8 obsoletes the use of A32 SWP/SWPB instructions such that
+ they are always undefined. Say Y here to enable software
+ emulation of these instructions for userspace using LDXR/STXR.
+
+ In some older versions of glibc [<=2.8] SWP is used during futex
+ trylock() operations with the assumption that the code will not
+ be preempted. This invalid assumption may be more likely to fail
+ with SWP emulation enabled, leading to deadlock of the user
+ application.
+
+ NOTE: when accessing uncached shared regions, LDXR/STXR rely
+ on an external transaction monitoring block called a global
+ monitor to maintain update atomicity. If your system does not
+ implement a global monitor, this option can cause programs that
+ perform SWP operations to uncached memory to deadlock.
+
+ If unsure, say Y
+
+config CP15_BARRIER_EMULATION
+ bool "Emulate CP15 Barrier instructions"
+ help
+ The CP15 barrier instructions - CP15ISB, CP15DSB, and
+ CP15DMB - are deprecated in ARMv8 (and ARMv7). It is
+ strongly recommended to use the ISB, DSB, and DMB
+ instructions instead.
+
+ Say Y here to enable software emulation of these
+ instructions for AArch32 userspace code. When this option is
+ enabled, CP15 barrier usage is traced which can help
+ identify software that needs updating.
+
+ If unsure, say Y
+
+endif
+
endmenu
menu "Boot options"
@@ -401,6 +577,17 @@ config EFI
allow the kernel to be booted as an EFI application. This
is only useful on systems that have UEFI firmware.
+config DMI
+ bool "Enable support for SMBIOS (DMI) tables"
+ depends on EFI
+ default y
+ help
+ This enables SMBIOS/DMI feature for systems.
+
+ This option is only useful on systems that have UEFI firmware.
+ However, even with this option, the resultant kernel should
+ continue to boot on existing non-UEFI platforms.
+
endmenu
menu "Userspace binary formats"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 0a12933e50ed..5fdd6dce8061 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,6 +6,18 @@ config FRAME_POINTER
bool
default y
+config ARM64_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
+ help
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+ who are working in architecture specific areas of the kernel.
+ It is probably not a good idea to enable this feature in a production
+ kernel.
+ If in doubt, say "N"
+
config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 295c72d52a1f..f1ad9c2ab2e9 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -599,7 +599,7 @@
compatible = "apm,xgene-enet";
status = "disabled";
reg = <0x0 0x17020000 0x0 0xd100>,
- <0x0 0X17030000 0x0 0X400>,
+ <0x0 0X17030000 0x0 0Xc300>,
<0x0 0X10000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x3c 0x4>;
@@ -624,9 +624,9 @@
sgenet0: ethernet@1f210000 {
compatible = "apm,xgene-enet";
status = "disabled";
- reg = <0x0 0x1f210000 0x0 0x10000>,
- <0x0 0x1f200000 0x0 0X10000>,
- <0x0 0x1B000000 0x0 0X20000>;
+ reg = <0x0 0x1f210000 0x0 0xd100>,
+ <0x0 0x1f200000 0x0 0Xc300>,
+ <0x0 0x1B000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0xA0 0x4>;
dma-coherent;
@@ -639,7 +639,7 @@
compatible = "apm,xgene-enet";
status = "disabled";
reg = <0x0 0x1f610000 0x0 0xd100>,
- <0x0 0x1f600000 0x0 0X400>,
+ <0x0 0x1f600000 0x0 0Xc300>,
<0x0 0x18000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x60 0x4>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 4ce602c2c6de..dd301be89ecc 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -35,6 +35,9 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_XGENE=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
CONFIG_KSM=y
@@ -52,6 +55,7 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_BPF_JIT=y
# CONFIG_WIRELESS is not set
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
@@ -65,16 +69,17 @@ CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_AHCI_XGENE=y
-CONFIG_PHY_XGENE=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_VIRTIO_NET=y
+CONFIG_NET_XGENE=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
-CONFIG_NET_XGENE=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_SERPORT is not set
@@ -87,6 +92,11 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
+# CONFIG_HMC_DRV is not set
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
# CONFIG_HWMON is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -97,13 +107,25 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_ULPI=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SPI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_XGENE=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PHY_XGENE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 5562652c5316..a38b02ce5f9a 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -27,20 +27,19 @@ config CRYPTO_AES_ARM64_CE
tristate "AES core cipher using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
- select CRYPTO_AES
config CRYPTO_AES_ARM64_CE_CCM
tristate "AES in CCM mode using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
- select CRYPTO_AES
+ select CRYPTO_AES_ARM64_CE
select CRYPTO_AEAD
config CRYPTO_AES_ARM64_CE_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
- select CRYPTO_AES
+ select CRYPTO_AES_ARM64_CE
select CRYPTO_ABLK_HELPER
config CRYPTO_AES_ARM64_NEON_BLK
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 9e6cdde9b43d..0ac73b838fa3 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -16,6 +16,8 @@
#include <linux/crypto.h>
#include <linux/module.h>
+#include "aes-ce-setkey.h"
+
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
@@ -48,7 +50,7 @@ static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
- ret = crypto_aes_expand_key(ctx, in_key, key_len);
+ ret = ce_aes_expandkey(ctx, in_key, key_len);
if (!ret)
return 0;
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index 2075e1acae6b..ce47792a983d 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -14,6 +14,8 @@
#include <linux/crypto.h>
#include <linux/module.h>
+#include "aes-ce-setkey.h"
+
MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
@@ -124,6 +126,114 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
kernel_neon_end();
}
+/*
+ * aes_sub() - use the aese instruction to perform the AES sbox substitution
+ * on each byte in 'input'
+ */
+static u32 aes_sub(u32 input)
+{
+ u32 ret;
+
+ __asm__("dup v1.4s, %w[in] ;"
+ "movi v0.16b, #0 ;"
+ "aese v0.16b, v1.16b ;"
+ "umov %w[out], v0.4s[0] ;"
+
+ : [out] "=r"(ret)
+ : [in] "r"(input)
+ : "v0","v1");
+
+ return ret;
+}
+
+int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ /*
+ * The AES key schedule round constants
+ */
+ static u8 const rcon[] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
+ };
+
+ u32 kwords = key_len / sizeof(u32);
+ struct aes_block *key_enc, *key_dec;
+ int i, j;
+
+ if (key_len != AES_KEYSIZE_128 &&
+ key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key_enc, in_key, key_len);
+ ctx->key_length = key_len;
+
+ kernel_neon_begin_partial(2);
+ for (i = 0; i < sizeof(rcon); i++) {
+ u32 *rki = ctx->key_enc + (i * kwords);
+ u32 *rko = rki + kwords;
+
+ rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
+ rko[1] = rko[0] ^ rki[1];
+ rko[2] = rko[1] ^ rki[2];
+ rko[3] = rko[2] ^ rki[3];
+
+ if (key_len == AES_KEYSIZE_192) {
+ if (i >= 7)
+ break;
+ rko[4] = rko[3] ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ } else if (key_len == AES_KEYSIZE_256) {
+ if (i >= 6)
+ break;
+ rko[4] = aes_sub(rko[3]) ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ rko[6] = rko[5] ^ rki[6];
+ rko[7] = rko[6] ^ rki[7];
+ }
+ }
+
+ /*
+ * Generate the decryption keys for the Equivalent Inverse Cipher.
+ * This involves reversing the order of the round keys, and applying
+ * the Inverse Mix Columns transformation on all but the first and
+ * the last one.
+ */
+ key_enc = (struct aes_block *)ctx->key_enc;
+ key_dec = (struct aes_block *)ctx->key_dec;
+ j = num_rounds(ctx);
+
+ key_dec[0] = key_enc[j];
+ for (i = 1, j--; j > 0; i++, j--)
+ __asm__("ld1 {v0.16b}, %[in] ;"
+ "aesimc v1.16b, v0.16b ;"
+ "st1 {v1.16b}, %[out] ;"
+
+ : [out] "=Q"(key_dec[i])
+ : [in] "Q"(key_enc[j])
+ : "v0","v1");
+ key_dec[i] = key_enc[0];
+
+ kernel_neon_end();
+ return 0;
+}
+EXPORT_SYMBOL(ce_aes_expandkey);
+
+int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = ce_aes_expandkey(ctx, in_key, key_len);
+ if (!ret)
+ return 0;
+
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ce_aes_setkey);
+
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-ce",
@@ -135,7 +245,7 @@ static struct crypto_alg aes_alg = {
.cra_cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = crypto_aes_set_key,
+ .cia_setkey = ce_aes_setkey,
.cia_encrypt = aes_cipher_encrypt,
.cia_decrypt = aes_cipher_decrypt
}
diff --git a/arch/arm64/crypto/aes-ce-setkey.h b/arch/arm64/crypto/aes-ce-setkey.h
new file mode 100644
index 000000000000..f08a6471d034
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-setkey.h
@@ -0,0 +1,5 @@
+
+int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 79cd911ef88c..801aae32841f 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -16,9 +16,13 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
+#include "aes-ce-setkey.h"
+
#ifdef USE_V8_CRYPTO_EXTENSIONS
#define MODE "ce"
#define PRIO 300
+#define aes_setkey ce_aes_setkey
+#define aes_expandkey ce_aes_expandkey
#define aes_ecb_encrypt ce_aes_ecb_encrypt
#define aes_ecb_decrypt ce_aes_ecb_decrypt
#define aes_cbc_encrypt ce_aes_cbc_encrypt
@@ -30,6 +34,8 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#else
#define MODE "neon"
#define PRIO 200
+#define aes_setkey crypto_aes_set_key
+#define aes_expandkey crypto_aes_expand_key
#define aes_ecb_encrypt neon_aes_ecb_encrypt
#define aes_ecb_decrypt neon_aes_ecb_decrypt
#define aes_cbc_encrypt neon_aes_cbc_encrypt
@@ -79,10 +85,10 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
- ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2);
+ ret = aes_expandkey(&ctx->key1, in_key, key_len / 2);
if (!ret)
- ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2],
- key_len / 2);
+ ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
+ key_len / 2);
if (!ret)
return 0;
@@ -288,7 +294,7 @@ static struct crypto_alg aes_algs[] = { {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .setkey = crypto_aes_set_key,
+ .setkey = aes_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
@@ -306,7 +312,7 @@ static struct crypto_alg aes_algs[] = { {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .setkey = crypto_aes_set_key,
+ .setkey = aes_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
@@ -324,7 +330,7 @@ static struct crypto_alg aes_algs[] = { {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .setkey = crypto_aes_set_key,
+ .setkey = aes_setkey,
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
},
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..919a67855b63
--- /dev/null
+++ b/arch/arm64/include/asm/alternative-asm.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ALTERNATIVE_ASM_H
+#define __ASM_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
+ .word \orig_offset - .
+ .word \alt_offset - .
+ .hword \feature
+ .byte \orig_len
+ .byte \alt_len
+.endm
+
+.macro alternative_insn insn1 insn2 cap
+661: \insn1
+662: .pushsection .altinstructions, "a"
+ altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
+ .popsection
+ .pushsection .altinstr_replacement, "ax"
+663: \insn2
+664: .popsection
+ .if ((664b-663b) != (662b-661b))
+ .error "Alternatives instruction length mismatch"
+ .endif
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
new file mode 100644
index 000000000000..d261f01e2bae
--- /dev/null
+++ b/arch/arm64/include/asm/alternative.h
@@ -0,0 +1,44 @@
+#ifndef __ASM_ALTERNATIVE_H
+#define __ASM_ALTERNATIVE_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 orig_offset; /* offset to original instruction */
+ s32 alt_offset; /* offset to replacement instruction */
+ u16 cpufeature; /* cpufeature bit set for replacement */
+ u8 orig_len; /* size of original instruction(s) */
+ u8 alt_len; /* size of new instruction(s), <= orig_len */
+};
+
+void apply_alternatives_all(void);
+void apply_alternatives(void *start, size_t length);
+void free_alternatives_memory(void);
+
+#define ALTINSTR_ENTRY(feature) \
+ " .word 661b - .\n" /* label */ \
+ " .word 663f - .\n" /* new instruction */ \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, newinstr, feature) \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" \
+ newinstr "\n" \
+ "664:\n\t" \
+ ".popsection\n\t" \
+ ".if ((664b-663b) != (662b-661b))\n\t" \
+ " .error \"Alternatives instruction length mismatch\"\n\t"\
+ ".endif\n"
+
+#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 88cc05b5f3ac..bde449936e2f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -32,6 +32,8 @@
#ifndef __ASSEMBLY__
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
static inline int cache_line_size(void)
{
u32 cwg = cache_type_cwg();
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 689b6379188c..7ae31a2cc6c0 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -73,7 +73,7 @@ extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
-extern void __flush_cache_user_range(unsigned long start, unsigned long end);
+extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
{
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index ddb9d7830558..cb9593079f29 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -19,6 +19,7 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
+#include <linux/mmdebug.h>
#include <asm/barrier.h>
@@ -152,6 +153,51 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
+#define system_has_cmpxchg_double() 1
+
+static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
+ unsigned long old1, unsigned long old2,
+ unsigned long new1, unsigned long new2, int size)
+{
+ unsigned long loop, lost;
+
+ switch (size) {
+ case 8:
+ VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
+ do {
+ asm volatile("// __cmpxchg_double8\n"
+ " ldxp %0, %1, %2\n"
+ " eor %0, %0, %3\n"
+ " eor %1, %1, %4\n"
+ " orr %1, %0, %1\n"
+ " mov %w0, #0\n"
+ " cbnz %1, 1f\n"
+ " stxp %w0, %5, %6, %2\n"
+ "1:\n"
+ : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
+ : "r" (old1), "r"(old2), "r"(new1), "r"(new2));
+ } while (loop);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return !lost;
+}
+
+static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
+ unsigned long old1, unsigned long old2,
+ unsigned long new1, unsigned long new2, int size)
+{
+ int ret;
+
+ smp_mb();
+ ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
+ smp_mb();
+
+ return ret;
+}
+
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
@@ -182,6 +228,33 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
+#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
+({\
+ int __ret;\
+ __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
+ (unsigned long)(o2), (unsigned long)(n1), \
+ (unsigned long)(n2), sizeof(*(ptr1)));\
+ __ret; \
+})
+
+#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
+({\
+ int __ret;\
+ __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
+ (unsigned long)(o2), (unsigned long)(n1), \
+ (unsigned long)(n2), sizeof(*(ptr1)));\
+ __ret; \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
+ cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
+ o1, o2, n1, n2)
+
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 56de5aadede2..3fb053fa6e98 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -205,6 +205,13 @@ typedef struct compat_siginfo {
compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
+
+ /* SIGSYS */
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
} _sifields;
} compat_siginfo_t;
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 056443086019..ace70682499b 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -30,6 +30,8 @@ struct cpuinfo_arm64 {
u32 reg_dczid;
u32 reg_midr;
+ u64 reg_id_aa64dfr0;
+ u64 reg_id_aa64dfr1;
u64 reg_id_aa64isar0;
u64 reg_id_aa64isar1;
u64 reg_id_aa64mmfr0;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index cd4ac0516488..07547ccc1f2b 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -21,9 +21,38 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+
+#define ARM64_NCAPS 2
+
+#ifndef __ASSEMBLY__
+
+extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+
static inline bool cpu_have_feature(unsigned int num)
{
return elf_hwcap & (1UL << num);
}
+static inline bool cpus_have_cap(unsigned int num)
+{
+ if (num >= ARM64_NCAPS)
+ return false;
+ return test_bit(num, cpu_hwcaps);
+}
+
+static inline void cpus_set_cap(unsigned int num)
+{
+ if (num >= ARM64_NCAPS)
+ pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
+ num, ARM64_NCAPS);
+ else
+ __set_bit(num, cpu_hwcaps);
+}
+
+void check_local_cpu_errata(void);
+
+#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 379d0b874328..8adb986a3086 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -57,6 +57,11 @@
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_CPU_PART(imp, partnum) \
+ (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
+ (0xf << MIDR_ARCHITECTURE_SHIFT) | \
+ ((partnum) << MIDR_PARTNUM_SHIFT))
+
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
diff --git a/arch/arm64/include/asm/dmi.h b/arch/arm64/include/asm/dmi.h
new file mode 100644
index 000000000000..69d37d87b159
--- /dev/null
+++ b/arch/arm64/include/asm/dmi.h
@@ -0,0 +1,31 @@
+/*
+ * arch/arm64/include/asm/dmi.h
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Written by: Yi Li (yi.li@linaro.org)
+ *
+ * based on arch/ia64/include/asm/dmi.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __ASM_DMI_H
+#define __ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+/*
+ * According to section 2.3.6 of the UEFI spec, the firmware should not
+ * request a virtual mapping for configuration tables such as SMBIOS.
+ * This means we have to map them before use.
+ */
+#define dmi_early_remap(x, l) ioremap_cache(x, l)
+#define dmi_early_unmap(x, l) iounmap(x)
+#define dmi_remap(x, l) ioremap_cache(x, l)
+#define dmi_unmap(x) iounmap(x)
+#define dmi_alloc(l) kzalloc(l, GFP_KERNEL)
+
+#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 5f7bfe6df723..9ef6eca905ca 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -31,6 +31,7 @@
*
*/
enum fixed_addresses {
+ FIX_HOLE,
FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
@@ -56,10 +57,11 @@ enum fixed_addresses {
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
-extern void __early_set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags);
+void __init early_fixmap_init(void);
-#define __set_fixmap __early_set_fixmap
+#define __early_set_fixmap __set_fixmap
+
+extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 024c46183c3c..0ad735166d9f 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,7 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_LPAE (1 << 20)
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#define COMPAT_HWCAP2_AES (1 << 0)
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 56a9e63b6c33..e2ff32a93b5c 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -354,6 +354,16 @@ bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+bool aarch32_insn_is_wide(u32 insn);
+
+#define A32_RN_OFFSET 16
+#define A32_RT_OFFSET 12
+#define A32_RT2_OFFSET 0
+
+u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
+u32 aarch32_insn_mcr_extract_opc2(u32 insn);
+u32 aarch32_insn_mcr_extract_crm(u32 insn);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 79f1d519221f..75825b63464d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -28,6 +28,8 @@
#include <asm/barrier.h>
#include <asm/pgtable.h>
#include <asm/early_ioremap.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <xen/xen.h>
@@ -57,28 +59,41 @@ static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
- asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
+ "ldarb %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
- asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
+
+ asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
+ "ldarh %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
- asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldr %w0, [%1]",
+ "ldar %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
- asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldr %0, [%1]",
+ "ldar %0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index e1f7ecdde11f..94c53674a31d 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -3,7 +3,8 @@
#include <asm-generic/irq.h>
-extern void (*handle_arch_irq)(struct pt_regs *);
+struct pt_regs;
+
extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7fd3e27e3ccc..8afb863f5a9e 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,6 +18,7 @@
#ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__
+#include <asm/memory.h>
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
@@ -160,9 +161,9 @@
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT (48LLU)
-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT (UL(48))
+#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
#define HSTR_EL2_TTEE (1 << 16)
@@ -185,13 +186,13 @@
/* Exception Syndrome Register (ESR) bits */
#define ESR_EL2_EC_SHIFT (26)
-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
-#define ESR_EL2_IL (1U << 25)
+#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
+#define ESR_EL2_IL (UL(1) << 25)
#define ESR_EL2_ISS (ESR_EL2_IL - 1)
#define ESR_EL2_ISV_SHIFT (24)
-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
+#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
#define ESR_EL2_SAS_SHIFT (22)
-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
+#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
#define ESR_EL2_SSE (1 << 21)
#define ESR_EL2_SRT_SHIFT (16)
#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
@@ -205,16 +206,16 @@
#define ESR_EL2_FSC_TYPE (0x3c)
#define ESR_EL2_CV_SHIFT (24)
-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
+#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
#define ESR_EL2_COND_SHIFT (20)
-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
+#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xFUL)
+#define HPFAR_MASK (~UL(0xf))
#define ESR_EL2_EC_UNKNOWN (0x00)
#define ESR_EL2_EC_WFI (0x01)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ccc7087d3c4e..a62cd077457b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
-#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
new file mode 100644
index 000000000000..4e603ea36ad3
--- /dev/null
+++ b/arch/arm64/include/asm/opcodes.h
@@ -0,0 +1 @@
+#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 5279e5733386..09da25bc596f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,6 +44,221 @@ static inline unsigned long __my_cpu_offset(void)
#endif /* CONFIG_SMP */
+#define PERCPU_OP(op, asm_op) \
+static inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+{ \
+ unsigned long loop, ret; \
+ \
+ switch (size) { \
+ case 1: \
+ do { \
+ asm ("//__per_cpu_" #op "_1\n" \
+ "ldxrb %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+ "stxrb %w[loop], %w[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u8 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ case 2: \
+ do { \
+ asm ("//__per_cpu_" #op "_2\n" \
+ "ldxrh %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+ "stxrh %w[loop], %w[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u16 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ case 4: \
+ do { \
+ asm ("//__per_cpu_" #op "_4\n" \
+ "ldxr %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+ "stxr %w[loop], %w[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u32 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ case 8: \
+ do { \
+ asm ("//__per_cpu_" #op "_8\n" \
+ "ldxr %[ret], %[ptr]\n" \
+ #asm_op " %[ret], %[ret], %[val]\n" \
+ "stxr %w[loop], %[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u64 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ \
+ return ret; \
+}
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, orr)
+#undef PERCPU_OP
+
+static inline unsigned long __percpu_read(void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ ret = ACCESS_ONCE(*(u8 *)ptr);
+ break;
+ case 2:
+ ret = ACCESS_ONCE(*(u16 *)ptr);
+ break;
+ case 4:
+ ret = ACCESS_ONCE(*(u32 *)ptr);
+ break;
+ case 8:
+ ret = ACCESS_ONCE(*(u64 *)ptr);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+static inline void __percpu_write(void *ptr, unsigned long val, int size)
+{
+ switch (size) {
+ case 1:
+ ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
+ break;
+ case 2:
+ ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
+ break;
+ case 4:
+ ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
+ break;
+ case 8:
+ ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ int size)
+{
+ unsigned long ret, loop;
+
+ switch (size) {
+ case 1:
+ do {
+ asm ("//__percpu_xchg_1\n"
+ "ldxrb %w[ret], %[ptr]\n"
+ "stxrb %w[loop], %w[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u8 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ case 2:
+ do {
+ asm ("//__percpu_xchg_2\n"
+ "ldxrh %w[ret], %[ptr]\n"
+ "stxrh %w[loop], %w[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u16 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ case 4:
+ do {
+ asm ("//__percpu_xchg_4\n"
+ "ldxr %w[ret], %[ptr]\n"
+ "stxr %w[loop], %w[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u32 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ case 8:
+ do {
+ asm ("//__percpu_xchg_8\n"
+ "ldxr %[ret], %[ptr]\n"
+ "stxr %w[loop], %[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u64 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+#define _percpu_add(pcp, val) \
+ __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+
+#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+
+#define _percpu_and(pcp, val) \
+ __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+
+#define _percpu_or(pcp, val) \
+ __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+
+#define _percpu_read(pcp) (typeof(pcp)) \
+ (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
+
+#define _percpu_write(pcp, val) \
+ __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+
+#define _percpu_xchg(pcp, val) (typeof(pcp)) \
+ (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+
+#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
+
+#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
+
+#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
+
+#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+
+#define this_cpu_read_1(pcp) _percpu_read(pcp)
+#define this_cpu_read_2(pcp) _percpu_read(pcp)
+#define this_cpu_read_4(pcp) _percpu_read(pcp)
+#define this_cpu_read_8(pcp) _percpu_read(pcp)
+
+#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+
+#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index d5bed02073d6..e20df38a8ff3 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,11 +26,13 @@
#define check_pgt_cache() do { } while (0)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pmd_t *)__get_free_page(PGALLOC_GFP);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -50,7 +52,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pud_t *)__get_free_page(PGALLOC_GFP);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -69,8 +71,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
-
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
new file mode 100644
index 000000000000..c76fac979629
--- /dev/null
+++ b/arch/arm64/include/asm/seccomp.h
@@ -0,0 +1,25 @@
+/*
+ * arch/arm64/include/asm/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#ifdef CONFIG_COMPAT
+#define __NR_seccomp_read_32 __NR_compat_read
+#define __NR_seccomp_write_32 __NR_compat_write
+#define __NR_seccomp_exit_32 __NR_compat_exit
+#define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn
+#endif /* CONFIG_COMPAT */
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a82c0c5c8b52..c028fe37456f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -19,10 +19,6 @@
#ifndef __ASM_TLB_H
#define __ASM_TLB_H
-#define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
-
-#include <asm-generic/tlb.h>
-
#include <linux/pagemap.h>
#include <linux/swap.h>
@@ -37,71 +33,22 @@ static inline void __tlb_remove_table(void *_table)
#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-/*
- * There's three ways the TLB shootdown code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * Page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- */
+#include <asm-generic/tlb.h>
+
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
- } else if (tlb->end > 0) {
+ } else {
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
flush_tlb_range(&vma, tlb->start, tlb->end);
- tlb->start = TASK_SIZE;
- tlb->end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- tlb->start = min(tlb->start, addr);
- tlb->end = max(tlb->end, addr + PAGE_SIZE);
- }
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void tlb_start_vma(struct mmu_gather *tlb,
- struct vm_area_struct *vma)
-{
- if (!tlb->fullmm) {
- tlb->start = TASK_SIZE;
- tlb->end = 0;
}
}
-static inline void tlb_end_vma(struct mmu_gather *tlb,
- struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
- tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, pte);
}
@@ -109,7 +56,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif
@@ -118,15 +64,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
- tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, virt_to_page(pudp));
}
#endif
-static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long address)
-{
- tlb_add_flush(tlb, address);
-}
-
#endif
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 10ca8ff93cc2..232e4ba5d314 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -18,6 +18,22 @@
#ifndef __ASM_TRAP_H
#define __ASM_TRAP_H
+#include <linux/list.h>
+
+struct pt_regs;
+
+struct undef_hook {
+ struct list_head node;
+ u32 instr_mask;
+ u32 instr_val;
+ u64 pstate_mask;
+ u64 pstate_val;
+ int (*fn)(struct pt_regs *regs, u32 instr);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+void unregister_undef_hook(struct undef_hook *hook);
+
static inline int in_exception_text(unsigned long ptr)
{
extern char __exception_text_start[];
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 6d2bf419431d..49c9aefd24a5 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -31,6 +31,9 @@
* Compat syscall numbers used by the AArch64 kernel.
*/
#define __NR_compat_restart_syscall 0
+#define __NR_compat_exit 1
+#define __NR_compat_read 3
+#define __NR_compat_write 4
#define __NR_compat_sigreturn 119
#define __NR_compat_rt_sigreturn 173
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index da1f06b535e3..8893cebcea5b 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -787,8 +787,11 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
#define __NR_renameat2 382
__SYSCALL(__NR_renameat2, sys_renameat2)
- /* 383 for seccomp */
+#define __NR_seccomp 383
+__SYSCALL(__NR_seccomp, sys_seccomp)
#define __NR_getrandom 384
__SYSCALL(__NR_getrandom, sys_getrandom)
#define __NR_memfd_create 385
__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 386
+__SYSCALL(__NR_bpf, sys_bpf)
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5bd029b43644..eaa77ed7766a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -5,6 +5,7 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_armv8_deprecated.o := -I$(src)
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
@@ -15,10 +16,11 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
- cpuinfo.o
+ cpuinfo.o cpu_errata.o alternative.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o
+ sys_compat.o \
+ ../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
@@ -31,6 +33,7 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
arm64-obj-$(CONFIG_PCI) += pci.o
+arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
new file mode 100644
index 000000000000..ad7821d64a1d
--- /dev/null
+++ b/arch/arm64/kernel/alternative.c
@@ -0,0 +1,85 @@
+/*
+ * alternative runtime patching
+ * inspired by the x86 version
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+#include <linux/stop_machine.h>
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
+struct alt_region {
+ struct alt_instr *begin;
+ struct alt_instr *end;
+};
+
+static int __apply_alternatives(void *alt_region)
+{
+ struct alt_instr *alt;
+ struct alt_region *region = alt_region;
+ u8 *origptr, *replptr;
+
+ for (alt = region->begin; alt < region->end; alt++) {
+ if (!cpus_have_cap(alt->cpufeature))
+ continue;
+
+ BUG_ON(alt->alt_len > alt->orig_len);
+
+ pr_info_once("patching kernel code\n");
+
+ origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
+ replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
+ memcpy(origptr, replptr, alt->alt_len);
+ flush_icache_range((uintptr_t)origptr,
+ (uintptr_t)(origptr + alt->alt_len));
+ }
+
+ return 0;
+}
+
+void apply_alternatives_all(void)
+{
+ struct alt_region region = {
+ .begin = __alt_instructions,
+ .end = __alt_instructions_end,
+ };
+
+ /* better not try code patching on a live SMP system */
+ stop_machine(__apply_alternatives, &region, NULL);
+}
+
+void apply_alternatives(void *start, size_t length)
+{
+ struct alt_region region = {
+ .begin = start,
+ .end = start + length,
+ };
+
+ __apply_alternatives(&region);
+}
+
+void free_alternatives_memory(void)
+{
+ free_reserved_area(__alt_instructions, __alt_instructions_end,
+ 0, "alternatives");
+}
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
new file mode 100644
index 000000000000..c363671d7509
--- /dev/null
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (C) 2014 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+
+#include <asm/insn.h>
+#include <asm/opcodes.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace-events-emulation.h"
+
+/*
+ * The runtime support for deprecated instruction support can be in one of
+ * following three states -
+ *
+ * 0 = undef
+ * 1 = emulate (software emulation)
+ * 2 = hw (supported in hardware)
+ */
+enum insn_emulation_mode {
+ INSN_UNDEF,
+ INSN_EMULATE,
+ INSN_HW,
+};
+
+enum legacy_insn_status {
+ INSN_DEPRECATED,
+ INSN_OBSOLETE,
+};
+
+struct insn_emulation_ops {
+ const char *name;
+ enum legacy_insn_status status;
+ struct undef_hook *hooks;
+ int (*set_hw_mode)(bool enable);
+};
+
+struct insn_emulation {
+ struct list_head node;
+ struct insn_emulation_ops *ops;
+ int current_mode;
+ int min;
+ int max;
+};
+
+static LIST_HEAD(insn_emulation);
+static int nr_insn_emulated;
+static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+
+static void register_emulation_hooks(struct insn_emulation_ops *ops)
+{
+ struct undef_hook *hook;
+
+ BUG_ON(!ops->hooks);
+
+ for (hook = ops->hooks; hook->instr_mask; hook++)
+ register_undef_hook(hook);
+
+ pr_notice("Registered %s emulation handler\n", ops->name);
+}
+
+static void remove_emulation_hooks(struct insn_emulation_ops *ops)
+{
+ struct undef_hook *hook;
+
+ BUG_ON(!ops->hooks);
+
+ for (hook = ops->hooks; hook->instr_mask; hook++)
+ unregister_undef_hook(hook);
+
+ pr_notice("Removed %s emulation handler\n", ops->name);
+}
+
+static int update_insn_emulation_mode(struct insn_emulation *insn,
+ enum insn_emulation_mode prev)
+{
+ int ret = 0;
+
+ switch (prev) {
+ case INSN_UNDEF: /* Nothing to be done */
+ break;
+ case INSN_EMULATE:
+ remove_emulation_hooks(insn->ops);
+ break;
+ case INSN_HW:
+ if (insn->ops->set_hw_mode) {
+ insn->ops->set_hw_mode(false);
+ pr_notice("Disabled %s support\n", insn->ops->name);
+ }
+ break;
+ }
+
+ switch (insn->current_mode) {
+ case INSN_UNDEF:
+ break;
+ case INSN_EMULATE:
+ register_emulation_hooks(insn->ops);
+ break;
+ case INSN_HW:
+ if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true))
+ pr_notice("Enabled %s support\n", insn->ops->name);
+ else
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void register_insn_emulation(struct insn_emulation_ops *ops)
+{
+ unsigned long flags;
+ struct insn_emulation *insn;
+
+ insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+ insn->ops = ops;
+ insn->min = INSN_UNDEF;
+
+ switch (ops->status) {
+ case INSN_DEPRECATED:
+ insn->current_mode = INSN_EMULATE;
+ insn->max = INSN_HW;
+ break;
+ case INSN_OBSOLETE:
+ insn->current_mode = INSN_UNDEF;
+ insn->max = INSN_EMULATE;
+ break;
+ }
+
+ raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+ list_add(&insn->node, &insn_emulation);
+ nr_insn_emulated++;
+ raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+
+ /* Register any handlers if required */
+ update_insn_emulation_mode(insn, INSN_UNDEF);
+}
+
+static int emulation_proc_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = 0;
+ struct insn_emulation *insn = (struct insn_emulation *) table->data;
+ enum insn_emulation_mode prev_mode = insn->current_mode;
+
+ table->data = &insn->current_mode;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write || prev_mode == insn->current_mode)
+ goto ret;
+
+ ret = update_insn_emulation_mode(insn, prev_mode);
+ if (ret) {
+ /* Mode change failed, revert to previous mode. */
+ insn->current_mode = prev_mode;
+ update_insn_emulation_mode(insn, INSN_UNDEF);
+ }
+ret:
+ table->data = insn;
+ return ret;
+}
+
+static struct ctl_table ctl_abi[] = {
+ {
+ .procname = "abi",
+ .mode = 0555,
+ },
+ { }
+};
+
+static void register_insn_emulation_sysctl(struct ctl_table *table)
+{
+ unsigned long flags;
+ int i = 0;
+ struct insn_emulation *insn;
+ struct ctl_table *insns_sysctl, *sysctl;
+
+ insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
+ GFP_KERNEL);
+
+ raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+ list_for_each_entry(insn, &insn_emulation, node) {
+ sysctl = &insns_sysctl[i];
+
+ sysctl->mode = 0644;
+ sysctl->maxlen = sizeof(int);
+
+ sysctl->procname = insn->ops->name;
+ sysctl->data = insn;
+ sysctl->extra1 = &insn->min;
+ sysctl->extra2 = &insn->max;
+ sysctl->proc_handler = emulation_proc_handler;
+ i++;
+ }
+ raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+
+ table->child = insns_sysctl;
+ register_sysctl_table(table);
+}
+
+/*
+ * Implement emulation of the SWP/SWPB instructions using load-exclusive and
+ * store-exclusive.
+ *
+ * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ * Where: Rt = destination
+ * Rt2 = source
+ * Rn = address
+ */
+
+/*
+ * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
+ */
+#define __user_swpX_asm(data, addr, res, temp, B) \
+ __asm__ __volatile__( \
+ " mov %w2, %w1\n" \
+ "0: ldxr"B" %w1, [%3]\n" \
+ "1: stxr"B" %w0, %w2, [%3]\n" \
+ " cbz %w0, 2f\n" \
+ " mov %w0, %w4\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w5\n" \
+ " b 2b\n" \
+ " .popsection" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 0b, 3b\n" \
+ " .quad 1b, 3b\n" \
+ " .popsection" \
+ : "=&r" (res), "+r" (data), "=&r" (temp) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "memory")
+
+#define __user_swp_asm(data, addr, res, temp) \
+ __user_swpX_asm(data, addr, res, temp, "")
+#define __user_swpb_asm(data, addr, res, temp) \
+ __user_swpX_asm(data, addr, res, temp, "b")
+
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+/*
+ * Set up process info to signal segmentation fault - called on access error.
+ */
+static void set_segfault(struct pt_regs *regs, unsigned long addr)
+{
+ siginfo_t info;
+
+ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, addr) == NULL)
+ info.si_code = SEGV_MAPERR;
+ else
+ info.si_code = SEGV_ACCERR;
+ up_read(&current->mm->mmap_sem);
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_addr = (void *) instruction_pointer(regs);
+
+ pr_debug("SWP{B} emulation: access caused memory abort!\n");
+ arm64_notify_die("Illegal memory access", regs, &info, 0);
+}
+
+static int emulate_swpX(unsigned int address, unsigned int *data,
+ unsigned int type)
+{
+ unsigned int res = 0;
+
+ if ((type != TYPE_SWPB) && (address & 0x3)) {
+ /* SWP to unaligned address not permitted */
+ pr_debug("SWP instruction on unaligned pointer!\n");
+ return -EFAULT;
+ }
+
+ while (1) {
+ unsigned long temp;
+
+ if (type == TYPE_SWPB)
+ __user_swpb_asm(*data, address, res, temp);
+ else
+ __user_swp_asm(*data, address, res, temp);
+
+ if (likely(res != -EAGAIN) || signal_pending(current))
+ break;
+
+ cond_resched();
+ }
+
+ return res;
+}
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, u32 instr)
+{
+ u32 destreg, data, type, address = 0;
+ int rn, rt2, res = 0;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ type = instr & TYPE_SWPB;
+
+ switch (arm_check_condition(instr, regs->pstate)) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ goto ret;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a SWP, undef */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
+ rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
+
+ address = (u32)regs->user_regs.regs[rn];
+ data = (u32)regs->user_regs.regs[rt2];
+ destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
+
+ pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
+ rn, address, destreg,
+ aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
+
+ /* Check access in reasonable access range for both SWP and SWPB */
+ if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+ pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
+ address);
+ goto fault;
+ }
+
+ res = emulate_swpX(address, &data, type);
+ if (res == -EFAULT)
+ goto fault;
+ else if (res == 0)
+ regs->user_regs.regs[destreg] = data;
+
+ret:
+ if (type == TYPE_SWPB)
+ trace_instruction_emulation("swpb", regs->pc);
+ else
+ trace_instruction_emulation("swp", regs->pc);
+
+ pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ regs->pc += 4;
+ return 0;
+
+fault:
+ set_segfault(regs, address);
+
+ return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb.
+ */
+static struct undef_hook swp_hooks[] = {
+ {
+ .instr_mask = 0x0fb00ff0,
+ .instr_val = 0x01000090,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = swp_handler
+ },
+ { }
+};
+
+static struct insn_emulation_ops swp_ops = {
+ .name = "swp",
+ .status = INSN_OBSOLETE,
+ .hooks = swp_hooks,
+ .set_hw_mode = NULL,
+};
+
+static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
+{
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ switch (arm_check_condition(instr, regs->pstate)) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ goto ret;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a barrier instruction */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ switch (aarch32_insn_mcr_extract_crm(instr)) {
+ case 10:
+ /*
+ * dmb - mcr p15, 0, Rt, c7, c10, 5
+ * dsb - mcr p15, 0, Rt, c7, c10, 4
+ */
+ if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
+ dmb(sy);
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
+ } else {
+ dsb(sy);
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
+ }
+ break;
+ case 5:
+ /*
+ * isb - mcr p15, 0, Rt, c7, c5, 4
+ *
+ * Taking an exception or returning from one acts as an
+ * instruction barrier. So no explicit barrier needed here.
+ */
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
+ break;
+ }
+
+ret:
+ pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ regs->pc += 4;
+ return 0;
+}
+
+#define SCTLR_EL1_CP15BEN (1 << 5)
+
+static inline void config_sctlr_el1(u32 clear, u32 set)
+{
+ u32 val;
+
+ asm volatile("mrs %0, sctlr_el1" : "=r" (val));
+ val &= ~clear;
+ val |= set;
+ asm volatile("msr sctlr_el1, %0" : : "r" (val));
+}
+
+static void enable_cp15_ben(void *info)
+{
+ config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
+}
+
+static void disable_cp15_ben(void *info)
+{
+ config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
+}
+
+static int cpu_hotplug_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+ enable_cp15_ben(NULL);
+ return NOTIFY_DONE;
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ disable_cp15_ben(NULL);
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_hotplug_notifier = {
+ .notifier_call = cpu_hotplug_notify,
+};
+
+static int cp15_barrier_set_hw_mode(bool enable)
+{
+ if (enable) {
+ register_cpu_notifier(&cpu_hotplug_notifier);
+ on_each_cpu(enable_cp15_ben, NULL, true);
+ } else {
+ unregister_cpu_notifier(&cpu_hotplug_notifier);
+ on_each_cpu(disable_cp15_ben, NULL, true);
+ }
+
+ return true;
+}
+
+static struct undef_hook cp15_barrier_hooks[] = {
+ {
+ .instr_mask = 0x0fff0fdf,
+ .instr_val = 0x0e070f9a,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = cp15barrier_handler,
+ },
+ {
+ .instr_mask = 0x0fff0fff,
+ .instr_val = 0x0e070f95,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = cp15barrier_handler,
+ },
+ { }
+};
+
+static struct insn_emulation_ops cp15_barrier_ops = {
+ .name = "cp15_barrier",
+ .status = INSN_DEPRECATED,
+ .hooks = cp15_barrier_hooks,
+ .set_hw_mode = cp15_barrier_set_hw_mode,
+};
+
+/*
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init armv8_deprecated_init(void)
+{
+ if (IS_ENABLED(CONFIG_SWP_EMULATION))
+ register_insn_emulation(&swp_ops);
+
+ if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
+ register_insn_emulation(&cp15_barrier_ops);
+
+ register_insn_emulation_sysctl(ctl_abi);
+
+ return 0;
+}
+
+late_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
new file mode 100644
index 000000000000..fa62637e63a8
--- /dev/null
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -0,0 +1,111 @@
+/*
+ * Contains CPU specific errata definitions
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/types.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpufeature.h>
+
+#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+
+/*
+ * Add a struct or another datatype to the union below if you need
+ * different means to detect an affected CPU.
+ */
+struct arm64_cpu_capabilities {
+ const char *desc;
+ u16 capability;
+ bool (*is_affected)(struct arm64_cpu_capabilities *);
+ union {
+ struct {
+ u32 midr_model;
+ u32 midr_range_min, midr_range_max;
+ };
+ };
+};
+
+#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
+static bool __maybe_unused
+is_affected_midr_range(struct arm64_cpu_capabilities *entry)
+{
+ u32 midr = read_cpuid_id();
+
+ if ((midr & CPU_MODEL_MASK) != entry->midr_model)
+ return false;
+
+ midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+
+ return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+}
+
+#define MIDR_RANGE(model, min, max) \
+ .is_affected = is_affected_midr_range, \
+ .midr_model = model, \
+ .midr_range_min = min, \
+ .midr_range_max = max
+
+struct arm64_cpu_capabilities arm64_errata[] = {
+#if defined(CONFIG_ARM64_ERRATUM_826319) || \
+ defined(CONFIG_ARM64_ERRATUM_827319) || \
+ defined(CONFIG_ARM64_ERRATUM_824069)
+ {
+ /* Cortex-A53 r0p[012] */
+ .desc = "ARM errata 826319, 827319, 824069",
+ .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_819472
+ {
+ /* Cortex-A53 r0p[01] */
+ .desc = "ARM errata 819472",
+ .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_832075
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 832075",
+ .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
+ },
+#endif
+ {
+ }
+};
+
+void check_local_cpu_errata(void)
+{
+ struct arm64_cpu_capabilities *cpus = arm64_errata;
+ int i;
+
+ for (i = 0; cpus[i].desc; i++) {
+ if (!cpus[i].is_affected(&cpus[i]))
+ continue;
+
+ if (!cpus_have_cap(cpus[i].capability))
+ pr_info("enabling workaround for %s\n", cpus[i].desc);
+ cpus_set_cap(cpus[i].capability);
+ }
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 504fdaa8367e..57b641747534 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -18,6 +18,7 @@
#include <asm/cachetype.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/cpufeature.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -111,6 +112,15 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(cntfrq, boot, cur, cpu);
/*
+ * The kernel uses self-hosted debug features and expects CPUs to
+ * support identical debug features. We presently need CTX_CMPs, WRPs,
+ * and BRPs to be identical.
+ * ID_AA64DFR1 is currently RES0.
+ */
+ diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
+ diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
+
+ /*
* Even in big.LITTLE, processors should be identical instruction-set
* wise.
*/
@@ -143,7 +153,12 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(id_isar3, boot, cur, cpu);
diff |= CHECK(id_isar4, boot, cur, cpu);
diff |= CHECK(id_isar5, boot, cur, cpu);
- diff |= CHECK(id_mmfr0, boot, cur, cpu);
+ /*
+ * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
+ * ACTLR formats could differ across CPUs and therefore would have to
+ * be trapped for virtualization anyway.
+ */
+ diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
diff |= CHECK(id_mmfr1, boot, cur, cpu);
diff |= CHECK(id_mmfr2, boot, cur, cpu);
diff |= CHECK(id_mmfr3, boot, cur, cpu);
@@ -155,7 +170,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
* pretend to support them.
*/
WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
- "Unsupported CPU feature variation.");
+ "Unsupported CPU feature variation.\n");
}
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
@@ -165,6 +180,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
+ info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
+ info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
@@ -186,6 +203,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
cpuinfo_detect_icache_policy(info);
+
+ check_local_cpu_errata();
}
void cpuinfo_store_cpu(void)
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 619b1dd7bcde..8ce9b0577442 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -54,18 +54,18 @@ ENTRY(efi_stub_entry)
b.eq efi_load_fail
/*
- * efi_entry() will have relocated the kernel image if necessary
- * and we return here with device tree address in x0 and the kernel
- * entry point stored at *image_addr. Save those values in registers
- * which are callee preserved.
+ * efi_entry() will have copied the kernel image if necessary and we
+ * return here with device tree address in x0 and the kernel entry
+ * point stored at *image_addr. Save those values in registers which
+ * are callee preserved.
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
- mov x21, x0
+ ldr x21, =stext_offset
+ add x21, x0, x21
/*
- * Flush dcache covering current runtime addresses
- * of kernel text/data. Then flush all of icache.
+ * Calculate size of the kernel Image (same for original and copy).
*/
adrp x1, _text
add x1, x1, #:lo12:_text
@@ -73,9 +73,24 @@ ENTRY(efi_stub_entry)
add x2, x2, #:lo12:_edata
sub x1, x2, x1
+ /*
+ * Flush the copied Image to the PoC, and ensure it is not shadowed by
+ * stale icache entries from before relocation.
+ */
bl __flush_dcache_area
ic ialluis
+ /*
+ * Ensure that the rest of this function (in the original Image) is
+ * visible when the caches are disabled. The I-cache can't have stale
+ * entries for the VA range of the current image, so no maintenance is
+ * necessary.
+ */
+ adr x0, efi_stub_entry
+ adr x1, efi_stub_entry_end
+ sub x1, x1, x0
+ bl __flush_dcache_area
+
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@@ -105,4 +120,5 @@ efi_load_fail:
ldp x29, x30, [sp], #32
ret
+efi_stub_entry_end:
ENDPROC(efi_stub_entry)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 95c49ebc660d..6fac253bc783 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
@@ -112,8 +113,6 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff, vendor);
retval = efi_config_init(NULL);
- if (retval == 0)
- set_bit(EFI_CONFIG_TABLES, &efi.flags);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
@@ -125,17 +124,17 @@ out:
*/
static __init int is_reserve_region(efi_memory_desc_t *md)
{
- if (!is_normal_ram(md))
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
return 0;
-
- if (md->attribute & EFI_MEMORY_RUNTIME)
- return 1;
-
- if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
- md->type == EFI_RESERVED_TYPE)
- return 1;
-
- return 0;
+ default:
+ break;
+ }
+ return is_normal_ram(md);
}
static __init void reserve_regions(void)
@@ -471,3 +470,17 @@ err_unmap:
return -1;
}
early_initcall(arm64_enter_virtual_mode);
+
+static int __init arm64_dmi_init(void)
+{
+ /*
+ * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
+ * be called early because dmi_id_init(), which is an arch_initcall
+ * itself, depends on dmi_scan_machine() having been called already.
+ */
+ dmi_scan_machine();
+ if (dmi_available)
+ dmi_set_dump_stack_arch_desc();
+ return 0;
+}
+core_initcall(arm64_dmi_init);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 38e704e597f7..08cafc518b9a 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -98,8 +98,8 @@
ENTRY(_mcount)
mcount_enter
- ldr x0, =ftrace_trace_function
- ldr x2, [x0]
+ adrp x0, ftrace_trace_function
+ ldr x2, [x0, #:lo12:ftrace_trace_function]
adr x0, ftrace_stub
cmp x0, x2 // if (ftrace_trace_function
b.eq skip_ftrace_call // != ftrace_stub) {
@@ -115,14 +115,15 @@ skip_ftrace_call: // return;
mcount_exit // return;
// }
skip_ftrace_call:
- ldr x1, =ftrace_graph_return
- ldr x2, [x1] // if ((ftrace_graph_return
- cmp x0, x2 // != ftrace_stub)
- b.ne ftrace_graph_caller
-
- ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
- ldr x2, [x1] // != ftrace_graph_entry_stub))
- ldr x0, =ftrace_graph_entry_stub
+ adrp x1, ftrace_graph_return
+ ldr x2, [x1, #:lo12:ftrace_graph_return]
+ cmp x0, x2 // if ((ftrace_graph_return
+ b.ne ftrace_graph_caller // != ftrace_stub)
+
+ adrp x1, ftrace_graph_entry // || (ftrace_graph_entry
+ adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
+ ldr x2, [x1, #:lo12:ftrace_graph_entry]
+ add x0, x0, #:lo12:ftrace_graph_entry_stub
cmp x0, x2
b.ne ftrace_graph_caller // ftrace_graph_caller();
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 726b910fe6ec..fd4fa374e5d2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -64,25 +64,26 @@
#define BAD_ERROR 3
.macro kernel_entry, el, regsize = 64
- sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
+ sub sp, sp, #S_FRAME_SIZE
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
- push x28, x29
- push x26, x27
- push x24, x25
- push x22, x23
- push x20, x21
- push x18, x19
- push x16, x17
- push x14, x15
- push x12, x13
- push x10, x11
- push x8, x9
- push x6, x7
- push x4, x5
- push x2, x3
- push x0, x1
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+ stp x12, x13, [sp, #16 * 6]
+ stp x14, x15, [sp, #16 * 7]
+ stp x16, x17, [sp, #16 * 8]
+ stp x18, x19, [sp, #16 * 9]
+ stp x20, x21, [sp, #16 * 10]
+ stp x22, x23, [sp, #16 * 11]
+ stp x24, x25, [sp, #16 * 12]
+ stp x26, x27, [sp, #16 * 13]
+ stp x28, x29, [sp, #16 * 14]
+
.if \el == 0
mrs x21, sp_el0
get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
@@ -118,33 +119,31 @@
.if \el == 0
ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer
+ msr sp_el0, x23
.endif
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
.if \ret
ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
- add sp, sp, S_X2
.else
- pop x0, x1
+ ldp x0, x1, [sp, #16 * 0]
.endif
- pop x2, x3 // load the rest of the registers
- pop x4, x5
- pop x6, x7
- pop x8, x9
- msr elr_el1, x21 // set up the return data
- msr spsr_el1, x22
- .if \el == 0
- msr sp_el0, x23
- .endif
- pop x10, x11
- pop x12, x13
- pop x14, x15
- pop x16, x17
- pop x18, x19
- pop x20, x21
- pop x22, x23
- pop x24, x25
- pop x26, x27
- pop x28, x29
- ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
+ ldp x2, x3, [sp, #16 * 1]
+ ldp x4, x5, [sp, #16 * 2]
+ ldp x6, x7, [sp, #16 * 3]
+ ldp x8, x9, [sp, #16 * 4]
+ ldp x10, x11, [sp, #16 * 5]
+ ldp x12, x13, [sp, #16 * 6]
+ ldp x14, x15, [sp, #16 * 7]
+ ldp x16, x17, [sp, #16 * 8]
+ ldp x18, x19, [sp, #16 * 9]
+ ldp x20, x21, [sp, #16 * 10]
+ ldp x22, x23, [sp, #16 * 11]
+ ldp x24, x25, [sp, #16 * 12]
+ ldp x26, x27, [sp, #16 * 13]
+ ldp x28, x29, [sp, #16 * 14]
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #S_FRAME_SIZE // restore sp
eret // return to kernel
.endm
@@ -168,7 +167,8 @@ tsk .req x28 // current thread_info
* Interrupt handling.
*/
.macro irq_handler
- ldr x1, handle_arch_irq
+ adrp x1, handle_arch_irq
+ ldr x1, [x1, #:lo12:handle_arch_irq]
mov x0, sp
blr x1
.endm
@@ -455,8 +455,8 @@ el0_da:
bic x0, x26, #(0xff << 56)
mov x1, x25
mov x2, sp
- adr lr, ret_to_user
- b do_mem_abort
+ bl do_mem_abort
+ b ret_to_user
el0_ia:
/*
* Instruction abort handling
@@ -468,8 +468,8 @@ el0_ia:
mov x0, x26
orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
mov x2, sp
- adr lr, ret_to_user
- b do_mem_abort
+ bl do_mem_abort
+ b ret_to_user
el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
@@ -478,8 +478,8 @@ el0_fpsimd_acc:
ct_user_exit
mov x0, x25
mov x1, sp
- adr lr, ret_to_user
- b do_fpsimd_acc
+ bl do_fpsimd_acc
+ b ret_to_user
el0_fpsimd_exc:
/*
* Floating Point or Advanced SIMD exception
@@ -488,8 +488,8 @@ el0_fpsimd_exc:
ct_user_exit
mov x0, x25
mov x1, sp
- adr lr, ret_to_user
- b do_fpsimd_exc
+ bl do_fpsimd_exc
+ b ret_to_user
el0_sp_pc:
/*
* Stack or PC alignment exception handling
@@ -500,8 +500,8 @@ el0_sp_pc:
mov x0, x26
mov x1, x25
mov x2, sp
- adr lr, ret_to_user
- b do_sp_pc_abort
+ bl do_sp_pc_abort
+ b ret_to_user
el0_undef:
/*
* Undefined instruction
@@ -510,8 +510,8 @@ el0_undef:
enable_dbg_and_irq
ct_user_exit
mov x0, sp
- adr lr, ret_to_user
- b do_undefinstr
+ bl do_undefinstr
+ b ret_to_user
el0_dbg:
/*
* Debug exception handling
@@ -530,8 +530,8 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mrs x2, esr_el1
- adr lr, ret_to_user
- b bad_mode
+ bl bad_mode
+ b ret_to_user
ENDPROC(el0_sync)
.align 6
@@ -653,14 +653,15 @@ el0_svc_naked: // compat entry point
ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
- adr lr, ret_fast_syscall // return address
cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys
ldr x16, [stbl, scno, lsl #3] // address in the syscall table
- br x16 // call sys_* routine
+ blr x16 // call sys_* routine
+ b ret_fast_syscall
ni_sys:
mov x0, sp
- b do_ni_syscall
+ bl do_ni_syscall
+ b ret_fast_syscall
ENDPROC(el0_svc)
/*
@@ -668,26 +669,38 @@ ENDPROC(el0_svc)
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov x0, sp
+ mov w0, #-1 // set default errno for
+ cmp scno, x0 // user-issued syscall(-1)
+ b.ne 1f
+ mov x0, #-ENOSYS
+ str x0, [sp, #S_X0]
+1: mov x0, sp
bl syscall_trace_enter
- adr lr, __sys_trace_return // return address
+ cmp w0, #-1 // skip the syscall?
+ b.eq __sys_trace_return_skipped
uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
cmp scno, sc_nr // check upper syscall limit
- b.hs ni_sys
+ b.hs __ni_sys_trace
ldp x0, x1, [sp] // restore the syscall args
ldp x2, x3, [sp, #S_X2]
ldp x4, x5, [sp, #S_X4]
ldp x6, x7, [sp, #S_X6]
ldr x16, [stbl, scno, lsl #3] // address in the syscall table
- br x16 // call sys_* routine
+ blr x16 // call sys_* routine
__sys_trace_return:
- str x0, [sp] // save returned x0
+ str x0, [sp, #S_X0] // save returned x0
+__sys_trace_return_skipped:
mov x0, sp
bl syscall_trace_exit
b ret_to_user
+__ni_sys_trace:
+ mov x0, sp
+ bl do_ni_syscall
+ b __sys_trace_return
+
/*
* Special system call wrappers.
*/
@@ -695,6 +708,3 @@ ENTRY(sys_rt_sigreturn_wrapper)
mov x0, sp
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
-
-ENTRY(handle_arch_irq)
- .quad 0
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0a6e4f924df8..8ce88e08c030 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -132,6 +132,8 @@ efi_head:
#endif
#ifdef CONFIG_EFI
+ .globl stext_offset
+ .set stext_offset, stext - efi_head
.align 3
pe_header:
.ascii "PE"
@@ -155,12 +157,12 @@ optional_header:
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
.long efi_stub_entry - efi_head // AddressOfEntryPoint
- .long stext - efi_head // BaseOfCode
+ .long stext_offset // BaseOfCode
extra_header_fields:
.quad 0 // ImageBase
- .long 0x20 // SectionAlignment
- .long 0x8 // FileAlignment
+ .long 0x1000 // SectionAlignment
+ .long PECOFF_FILE_ALIGNMENT // FileAlignment
.short 0 // MajorOperatingSystemVersion
.short 0 // MinorOperatingSystemVersion
.short 0 // MajorImageVersion
@@ -172,7 +174,7 @@ extra_header_fields:
.long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
- .long stext - efi_head // SizeOfHeaders
+ .long stext_offset // SizeOfHeaders
.long 0 // CheckSum
.short 0xa // Subsystem (EFI application)
.short 0 // DllCharacteristics
@@ -217,16 +219,24 @@ section_table:
.byte 0
.byte 0 // end of 0 padding of section name
.long _end - stext // VirtualSize
- .long stext - efi_head // VirtualAddress
+ .long stext_offset // VirtualAddress
.long _edata - stext // SizeOfRawData
- .long stext - efi_head // PointerToRawData
+ .long stext_offset // PointerToRawData
.long 0 // PointerToRelocations (0 for executables)
.long 0 // PointerToLineNumbers (0 for executables)
.short 0 // NumberOfRelocations (0 for executables)
.short 0 // NumberOfLineNumbers (0 for executables)
.long 0xe0500020 // Characteristics (section flags)
- .align 5
+
+ /*
+ * EFI will load stext onwards at the 4k section alignment
+ * described in the PE/COFF header. To ensure that instruction
+ * sequences using an adrp and a :lo12: immediate will function
+ * correctly at this alignment, we must ensure that stext is
+ * placed at a 4k boundary in the Image to begin with.
+ */
+ .align 12
#endif
ENTRY(stext)
@@ -238,7 +248,13 @@ ENTRY(stext)
mov x0, x22
bl lookup_processor_type
mov x23, x0 // x23=current cpu_table
- cbz x23, __error_p // invalid processor (x23=0)?
+ /*
+ * __error_p may end up out of range for cbz if text areas are
+ * aligned up to section sizes.
+ */
+ cbnz x23, 1f // invalid processor (x23=0)?
+ b __error_p
+1:
bl __vet_fdt
bl __create_page_tables // x25=TTBR0, x26=TTBR1
/*
@@ -250,13 +266,214 @@ ENTRY(stext)
*/
ldr x27, __switch_data // address to jump to after
// MMU has been enabled
- adr lr, __enable_mmu // return (PIC) address
+ adrp lr, __enable_mmu // return (PIC) address
+ add lr, lr, #:lo12:__enable_mmu
ldr x12, [x23, #CPU_INFO_SETUP]
add x12, x12, x28 // __virt_to_phys
br x12 // initialise processor
ENDPROC(stext)
/*
+ * Determine validity of the x21 FDT pointer.
+ * The dtb must be 8-byte aligned and live in the first 512M of memory.
+ */
+__vet_fdt:
+ tst x21, #0x7
+ b.ne 1f
+ cmp x21, x24
+ b.lt 1f
+ mov x0, #(1 << 29)
+ add x0, x0, x24
+ cmp x21, x0
+ b.ge 1f
+ ret
+1:
+ mov x21, #0
+ ret
+ENDPROC(__vet_fdt)
+/*
+ * Macro to create a table entry to the next page.
+ *
+ * tbl: page table address
+ * virt: virtual address
+ * shift: #imm page table shift
+ * ptrs: #imm pointers per table page
+ *
+ * Preserves: virt
+ * Corrupts: tmp1, tmp2
+ * Returns: tbl -> next level table page address
+ */
+ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
+ lsr \tmp1, \virt, #\shift
+ and \tmp1, \tmp1, #\ptrs - 1 // table index
+ add \tmp2, \tbl, #PAGE_SIZE
+ orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
+ str \tmp2, [\tbl, \tmp1, lsl #3]
+ add \tbl, \tbl, #PAGE_SIZE // next level table page
+ .endm
+
+/*
+ * Macro to populate the PGD (and possibily PUD) for the corresponding
+ * block entry in the next level (tbl) for the given virtual address.
+ *
+ * Preserves: tbl, next, virt
+ * Corrupts: tmp1, tmp2
+ */
+ .macro create_pgd_entry, tbl, virt, tmp1, tmp2
+ create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
+#if SWAPPER_PGTABLE_LEVELS == 3
+ create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
+#endif
+ .endm
+
+/*
+ * Macro to populate block entries in the page table for the start..end
+ * virtual range (inclusive).
+ *
+ * Preserves: tbl, flags
+ * Corrupts: phys, start, end, pstate
+ */
+ .macro create_block_map, tbl, flags, phys, start, end
+ lsr \phys, \phys, #BLOCK_SHIFT
+ lsr \start, \start, #BLOCK_SHIFT
+ and \start, \start, #PTRS_PER_PTE - 1 // table index
+ orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
+ lsr \end, \end, #BLOCK_SHIFT
+ and \end, \end, #PTRS_PER_PTE - 1 // table end index
+9999: str \phys, [\tbl, \start, lsl #3] // store the entry
+ add \start, \start, #1 // next entry
+ add \phys, \phys, #BLOCK_SIZE // next block
+ cmp \start, \end
+ b.ls 9999b
+ .endm
+
+/*
+ * Setup the initial page tables. We only setup the barest amount which is
+ * required to get the kernel running. The following sections are required:
+ * - identity mapping to enable the MMU (low address, TTBR0)
+ * - first few MB of the kernel linear mapping to jump to once the MMU has
+ * been enabled, including the FDT blob (TTBR1)
+ * - pgd entry for fixed mappings (TTBR1)
+ */
+__create_page_tables:
+ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
+ mov x27, lr
+
+ /*
+ * Invalidate the idmap and swapper page tables to avoid potential
+ * dirty cache lines being evicted.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
+
+ /*
+ * Clear the idmap and swapper page tables.
+ */
+ mov x0, x25
+ add x6, x26, #SWAPPER_DIR_SIZE
+1: stp xzr, xzr, [x0], #16
+ stp xzr, xzr, [x0], #16
+ stp xzr, xzr, [x0], #16
+ stp xzr, xzr, [x0], #16
+ cmp x0, x6
+ b.lo 1b
+
+ ldr x7, =MM_MMUFLAGS
+
+ /*
+ * Create the identity mapping.
+ */
+ mov x0, x25 // idmap_pg_dir
+ ldr x3, =KERNEL_START
+ add x3, x3, x28 // __pa(KERNEL_START)
+ create_pgd_entry x0, x3, x5, x6
+ ldr x6, =KERNEL_END
+ mov x5, x3 // __pa(KERNEL_START)
+ add x6, x6, x28 // __pa(KERNEL_END)
+ create_block_map x0, x7, x3, x5, x6
+
+ /*
+ * Map the kernel image (starting with PHYS_OFFSET).
+ */
+ mov x0, x26 // swapper_pg_dir
+ mov x5, #PAGE_OFFSET
+ create_pgd_entry x0, x5, x3, x6
+ ldr x6, =KERNEL_END
+ mov x3, x24 // phys offset
+ create_block_map x0, x7, x3, x5, x6
+
+ /*
+ * Map the FDT blob (maximum 2MB; must be within 512MB of
+ * PHYS_OFFSET).
+ */
+ mov x3, x21 // FDT phys address
+ and x3, x3, #~((1 << 21) - 1) // 2MB aligned
+ mov x6, #PAGE_OFFSET
+ sub x5, x3, x24 // subtract PHYS_OFFSET
+ tst x5, #~((1 << 29) - 1) // within 512MB?
+ csel x21, xzr, x21, ne // zero the FDT pointer
+ b.ne 1f
+ add x5, x5, x6 // __va(FDT blob)
+ add x6, x5, #1 << 21 // 2MB for the FDT blob
+ sub x6, x6, #1 // inclusive range
+ create_block_map x0, x7, x3, x5, x6
+1:
+ /*
+ * Since the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate the idmap and swapper page
+ * tables again to remove any speculatively loaded cache lines.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
+
+ mov lr, x27
+ ret
+ENDPROC(__create_page_tables)
+ .ltorg
+
+ .align 3
+ .type __switch_data, %object
+__switch_data:
+ .quad __mmap_switched
+ .quad __bss_start // x6
+ .quad __bss_stop // x7
+ .quad processor_id // x4
+ .quad __fdt_pointer // x5
+ .quad memstart_addr // x6
+ .quad init_thread_union + THREAD_START_SP // sp
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode, and
+ * uses absolute addresses; this is not position independent.
+ */
+__mmap_switched:
+ adr x3, __switch_data + 8
+
+ ldp x6, x7, [x3], #16
+1: cmp x6, x7
+ b.hs 2f
+ str xzr, [x6], #8 // Clear BSS
+ b 1b
+2:
+ ldp x4, x5, [x3], #16
+ ldr x6, [x3], #8
+ ldr x16, [x3]
+ mov sp, x16
+ str x22, [x4] // Save processor ID
+ str x21, [x5] // Save FDT pointer
+ str x24, [x6] // Save PHYS_OFFSET
+ mov x29, #0
+ b start_kernel
+ENDPROC(__mmap_switched)
+
+/*
+ * end early head section, begin head code that is also used for
+ * hotplug and needs to have the same protections as the text region
+ */
+ .section ".text","ax"
+/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
*
@@ -331,7 +548,8 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr vttbr_el2, xzr
/* Hypervisor stub */
- adr x0, __hyp_stub_vectors
+ adrp x0, __hyp_stub_vectors
+ add x0, x0, #:lo12:__hyp_stub_vectors
msr vbar_el2, x0
/* spsr */
@@ -492,183 +710,6 @@ ENDPROC(__calc_phys_offset)
.quad PAGE_OFFSET
/*
- * Macro to create a table entry to the next page.
- *
- * tbl: page table address
- * virt: virtual address
- * shift: #imm page table shift
- * ptrs: #imm pointers per table page
- *
- * Preserves: virt
- * Corrupts: tmp1, tmp2
- * Returns: tbl -> next level table page address
- */
- .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
- lsr \tmp1, \virt, #\shift
- and \tmp1, \tmp1, #\ptrs - 1 // table index
- add \tmp2, \tbl, #PAGE_SIZE
- orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
- str \tmp2, [\tbl, \tmp1, lsl #3]
- add \tbl, \tbl, #PAGE_SIZE // next level table page
- .endm
-
-/*
- * Macro to populate the PGD (and possibily PUD) for the corresponding
- * block entry in the next level (tbl) for the given virtual address.
- *
- * Preserves: tbl, next, virt
- * Corrupts: tmp1, tmp2
- */
- .macro create_pgd_entry, tbl, virt, tmp1, tmp2
- create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
-#if SWAPPER_PGTABLE_LEVELS == 3
- create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
-#endif
- .endm
-
-/*
- * Macro to populate block entries in the page table for the start..end
- * virtual range (inclusive).
- *
- * Preserves: tbl, flags
- * Corrupts: phys, start, end, pstate
- */
- .macro create_block_map, tbl, flags, phys, start, end
- lsr \phys, \phys, #BLOCK_SHIFT
- lsr \start, \start, #BLOCK_SHIFT
- and \start, \start, #PTRS_PER_PTE - 1 // table index
- orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
- lsr \end, \end, #BLOCK_SHIFT
- and \end, \end, #PTRS_PER_PTE - 1 // table end index
-9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- add \start, \start, #1 // next entry
- add \phys, \phys, #BLOCK_SIZE // next block
- cmp \start, \end
- b.ls 9999b
- .endm
-
-/*
- * Setup the initial page tables. We only setup the barest amount which is
- * required to get the kernel running. The following sections are required:
- * - identity mapping to enable the MMU (low address, TTBR0)
- * - first few MB of the kernel linear mapping to jump to once the MMU has
- * been enabled, including the FDT blob (TTBR1)
- * - pgd entry for fixed mappings (TTBR1)
- */
-__create_page_tables:
- pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
- mov x27, lr
-
- /*
- * Invalidate the idmap and swapper page tables to avoid potential
- * dirty cache lines being evicted.
- */
- mov x0, x25
- add x1, x26, #SWAPPER_DIR_SIZE
- bl __inval_cache_range
-
- /*
- * Clear the idmap and swapper page tables.
- */
- mov x0, x25
- add x6, x26, #SWAPPER_DIR_SIZE
-1: stp xzr, xzr, [x0], #16
- stp xzr, xzr, [x0], #16
- stp xzr, xzr, [x0], #16
- stp xzr, xzr, [x0], #16
- cmp x0, x6
- b.lo 1b
-
- ldr x7, =MM_MMUFLAGS
-
- /*
- * Create the identity mapping.
- */
- mov x0, x25 // idmap_pg_dir
- ldr x3, =KERNEL_START
- add x3, x3, x28 // __pa(KERNEL_START)
- create_pgd_entry x0, x3, x5, x6
- ldr x6, =KERNEL_END
- mov x5, x3 // __pa(KERNEL_START)
- add x6, x6, x28 // __pa(KERNEL_END)
- create_block_map x0, x7, x3, x5, x6
-
- /*
- * Map the kernel image (starting with PHYS_OFFSET).
- */
- mov x0, x26 // swapper_pg_dir
- mov x5, #PAGE_OFFSET
- create_pgd_entry x0, x5, x3, x6
- ldr x6, =KERNEL_END
- mov x3, x24 // phys offset
- create_block_map x0, x7, x3, x5, x6
-
- /*
- * Map the FDT blob (maximum 2MB; must be within 512MB of
- * PHYS_OFFSET).
- */
- mov x3, x21 // FDT phys address
- and x3, x3, #~((1 << 21) - 1) // 2MB aligned
- mov x6, #PAGE_OFFSET
- sub x5, x3, x24 // subtract PHYS_OFFSET
- tst x5, #~((1 << 29) - 1) // within 512MB?
- csel x21, xzr, x21, ne // zero the FDT pointer
- b.ne 1f
- add x5, x5, x6 // __va(FDT blob)
- add x6, x5, #1 << 21 // 2MB for the FDT blob
- sub x6, x6, #1 // inclusive range
- create_block_map x0, x7, x3, x5, x6
-1:
- /*
- * Since the page tables have been populated with non-cacheable
- * accesses (MMU disabled), invalidate the idmap and swapper page
- * tables again to remove any speculatively loaded cache lines.
- */
- mov x0, x25
- add x1, x26, #SWAPPER_DIR_SIZE
- bl __inval_cache_range
-
- mov lr, x27
- ret
-ENDPROC(__create_page_tables)
- .ltorg
-
- .align 3
- .type __switch_data, %object
-__switch_data:
- .quad __mmap_switched
- .quad __bss_start // x6
- .quad __bss_stop // x7
- .quad processor_id // x4
- .quad __fdt_pointer // x5
- .quad memstart_addr // x6
- .quad init_thread_union + THREAD_START_SP // sp
-
-/*
- * The following fragment of code is executed with the MMU on in MMU mode, and
- * uses absolute addresses; this is not position independent.
- */
-__mmap_switched:
- adr x3, __switch_data + 8
-
- ldp x6, x7, [x3], #16
-1: cmp x6, x7
- b.hs 2f
- str xzr, [x6], #8 // Clear BSS
- b 1b
-2:
- ldp x4, x5, [x3], #16
- ldr x6, [x3], #8
- ldr x16, [x3]
- mov sp, x16
- str x22, [x4] // Save processor ID
- str x21, [x5] // Save FDT pointer
- str x24, [x6] // Save PHYS_OFFSET
- mov x29, #0
- b start_kernel
-ENDPROC(__mmap_switched)
-
-/*
* Exception handling. Something went wrong and we can't proceed. We ought to
* tell the user, but since we don't have any guarantee that we're even
* running on the right architecture, we do virtually nothing.
@@ -715,22 +756,3 @@ __lookup_processor_type_data:
.quad .
.quad cpu_table
.size __lookup_processor_type_data, . - __lookup_processor_type_data
-
-/*
- * Determine validity of the x21 FDT pointer.
- * The dtb must be 8-byte aligned and live in the first 512M of memory.
- */
-__vet_fdt:
- tst x21, #0x7
- b.ne 1f
- cmp x21, x24
- b.lt 1f
- mov x0, #(1 << 29)
- add x0, x0, x24
- cmp x21, x0
- b.ge 1f
- ret
-1:
- mov x21, #0
- ret
-ENDPROC(__vet_fdt)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index e007714ded04..7e9327a0986d 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
* which ends with "dsb; isb" pair guaranteeing global
* visibility.
*/
- atomic_set(&pp->cpu_count, -1);
+ /* Notify other processors with an additional increment. */
+ atomic_inc(&pp->cpu_count);
} else {
- while (atomic_read(&pp->cpu_count) != -1)
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
cpu_relax();
isb();
}
@@ -959,3 +960,29 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
+
+bool aarch32_insn_is_wide(u32 insn)
+{
+ return insn >= 0xe800;
+}
+
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
+{
+ return (insn & (0xf << offset)) >> offset;
+}
+
+#define OPC2_MASK 0x7
+#define OPC2_OFFSET 5
+u32 aarch32_insn_mcr_extract_opc2(u32 insn)
+{
+ return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
+}
+
+#define CRM_MASK 0xf
+u32 aarch32_insn_mcr_extract_crm(u32 insn)
+{
+ return insn & CRM_MASK;
+}
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 7d37ead4d199..354be2a872ae 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -25,12 +25,26 @@
*/
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
- unsigned char *t = to;
- while (count) {
+ while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
+ !IS_ALIGNED((unsigned long)to, 8))) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
count--;
- *t = readb(from);
- t++;
+ }
+
+ while (count >= 8) {
+ *(u64 *)to = __raw_readq(from);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
from++;
+ to++;
+ count--;
}
}
EXPORT_SYMBOL(__memcpy_fromio);
@@ -40,12 +54,26 @@ EXPORT_SYMBOL(__memcpy_fromio);
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
- const unsigned char *f = from;
- while (count) {
+ while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
+ !IS_ALIGNED((unsigned long)from, 8))) {
+ __raw_writeb(*(volatile u8 *)from, to);
+ from++;
+ to++;
count--;
- writeb(*f, to);
- f++;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(*(volatile u64 *)from, to);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(*(volatile u8 *)from, to);
+ from++;
to++;
+ count--;
}
}
EXPORT_SYMBOL(__memcpy_toio);
@@ -55,10 +83,28 @@ EXPORT_SYMBOL(__memcpy_toio);
*/
void __memset_io(volatile void __iomem *dst, int c, size_t count)
{
- while (count) {
+ u64 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+ qc |= qc << 32;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
+ __raw_writeb(c, dst);
+ dst++;
count--;
- writeb(c, dst);
+ }
+
+ while (count >= 8) {
+ __raw_writeq(qc, dst);
+ dst += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
dst++;
+ count--;
}
}
EXPORT_SYMBOL(__memset_io);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 071a6ec13bd8..240b75c0e94f 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -40,6 +40,8 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
+void (*handle_arch_irq)(struct pt_regs *) = NULL;
+
void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
{
if (handle_arch_irq)
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 263a166291fb..4f1fec7a46db 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -22,9 +22,8 @@
#ifdef HAVE_JUMP_LABEL
-static void __arch_jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- bool is_static)
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
{
void *addr = (void *)entry->code;
u32 insn;
@@ -37,22 +36,18 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
insn = aarch64_insn_gen_nop();
}
- if (is_static)
- aarch64_insn_patch_text_nosync(addr, insn);
- else
- aarch64_insn_patch_text(&addr, &insn, 1);
-}
-
-void arch_jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
-{
- __arch_jump_label_transform(entry, type, false);
+ aarch64_insn_patch_text(&addr, &insn, 1);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
- __arch_jump_label_transform(entry, type, true);
+ /*
+ * We use the architected A64 NOP in arch_static_branch, so there's no
+ * need to patch an identical A64 NOP over the top of it here. The core
+ * will call arch_jump_label_transform from a module notifier if the
+ * NOP needs to be replaced by a branch.
+ */
}
#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 1eb1cc955139..fd027b101de5 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -26,6 +26,7 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <asm/insn.h>
+#include <asm/sections.h>
#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
@@ -394,3 +395,20 @@ overflow:
me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
return -ENOEXEC;
}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
+ apply_alternatives((void *)s->sh_addr, s->sh_size);
+ return 0;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index aa29ecb4f800..25a5308744b1 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -169,8 +169,14 @@ armpmu_event_set_period(struct perf_event *event,
ret = 1;
}
- if (left > (s64)armpmu->max_period)
- left = armpmu->max_period;
+ /*
+ * Limit the maximum period to prevent the counter value
+ * from overtaking the one we are about to program. In
+ * effect we are reducing max_period to account for
+ * interrupt latency (and we are being very conservative).
+ */
+ if (left > (armpmu->max_period >> 1))
+ left = armpmu->max_period >> 1;
local64_set(&hwc->prev_count, (u64)-left);
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 866c1c821860..663da771580a 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -528,7 +528,7 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
if (WARN_ON_ONCE(!index))
return -EINVAL;
- if (state->type == PSCI_POWER_STATE_TYPE_STANDBY)
+ if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
ret = __cpu_suspend(index, psci_suspend_finisher);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 8a4ae8e73213..d882b833dbdb 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -27,6 +27,7 @@
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/user.h>
+#include <linux/seccomp.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
@@ -551,6 +552,32 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
return ret;
}
+static int system_call_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int syscallno = task_pt_regs(target)->syscallno;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &syscallno, 0, -1);
+}
+
+static int system_call_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int syscallno, ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
+ if (ret)
+ return ret;
+
+ task_pt_regs(target)->syscallno = syscallno;
+ return ret;
+}
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -559,6 +586,7 @@ enum aarch64_regset {
REGSET_HW_BREAK,
REGSET_HW_WATCH,
#endif
+ REGSET_SYSTEM_CALL,
};
static const struct user_regset aarch64_regsets[] = {
@@ -608,6 +636,14 @@ static const struct user_regset aarch64_regsets[] = {
.set = hw_break_set,
},
#endif
+ [REGSET_SYSTEM_CALL] = {
+ .core_note_type = NT_ARM_SYSTEM_CALL,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = system_call_get,
+ .set = system_call_set,
+ },
};
static const struct user_regset_view user_aarch64_view = {
@@ -1114,6 +1150,10 @@ static void tracehook_report_syscall(struct pt_regs *regs,
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
+ /* Do the secure computing check first; failures should be fast. */
+ if (secure_computing() == -1)
+ return -1;
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 2437196cc5d4..b80991166754 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -43,12 +43,14 @@
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/efi.h>
+#include <linux/personality.h>
#include <asm/fixmap.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -72,13 +74,15 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
#endif
+DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+
static const char *cpu_name;
-static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
/*
@@ -116,12 +120,16 @@ void __init early_print(const char *str, ...)
void __init smp_setup_processor_id(void)
{
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_logical_map(0) = mpidr;
+
/*
* clear __my_cpu_offset on boot CPU to avoid hang caused by
* using percpu variable early, for example, lockdep will
* access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
+ pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
}
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
@@ -311,7 +319,7 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
cpu_relax();
}
- machine_name = of_flat_dt_get_machine_name();
+ dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
}
/*
@@ -376,6 +384,7 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
+ early_fixmap_init();
early_ioremap_init();
parse_early_param();
@@ -398,7 +407,6 @@ void __init setup_arch(char **cmdline_p)
psci_init();
- cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
@@ -447,14 +455,50 @@ static const char *hwcap_str[] = {
NULL
};
+#ifdef CONFIG_COMPAT
+static const char *compat_hwcap_str[] = {
+ "swp",
+ "half",
+ "thumb",
+ "26bit",
+ "fastmult",
+ "fpa",
+ "vfp",
+ "edsp",
+ "java",
+ "iwmmxt",
+ "crunch",
+ "thumbee",
+ "neon",
+ "vfpv3",
+ "vfpv3d16",
+ "tls",
+ "vfpv4",
+ "idiva",
+ "idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm"
+};
+
+static const char *compat_hwcap2_str[] = {
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
+ NULL
+};
+#endif /* CONFIG_COMPAT */
+
static int c_show(struct seq_file *m, void *v)
{
- int i;
-
- seq_printf(m, "Processor\t: %s rev %d (%s)\n",
- cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
+ int i, j;
for_each_online_cpu(i) {
+ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+ u32 midr = cpuinfo->reg_midr;
+
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
@@ -463,24 +507,38 @@ static int c_show(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- }
-
- /* dump out the processor features */
- seq_puts(m, "Features\t: ");
-
- for (i = 0; hwcap_str[i]; i++)
- if (elf_hwcap & (1 << i))
- seq_printf(m, "%s ", hwcap_str[i]);
-
- seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
- seq_printf(m, "CPU architecture: AArch64\n");
- seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
- seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
- seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
- seq_puts(m, "\n");
-
- seq_printf(m, "Hardware\t: %s\n", machine_name);
+ /*
+ * Dump out the common processor features in a single line.
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+ * rather than attempting to parse this, but there's a body of
+ * software which does already (at least for 32-bit).
+ */
+ seq_puts(m, "Features\t:");
+ if (personality(current->personality) == PER_LINUX32) {
+#ifdef CONFIG_COMPAT
+ for (j = 0; compat_hwcap_str[j]; j++)
+ if (compat_elf_hwcap & (1 << j))
+ seq_printf(m, " %s", compat_hwcap_str[j]);
+
+ for (j = 0; compat_hwcap2_str[j]; j++)
+ if (compat_elf_hwcap2 & (1 << j))
+ seq_printf(m, " %s", compat_hwcap2_str[j]);
+#endif /* CONFIG_COMPAT */
+ } else {
+ for (j = 0; hwcap_str[j]; j++)
+ if (elf_hwcap & (1 << j))
+ seq_printf(m, " %s", hwcap_str[j]);
+ }
+ seq_puts(m, "\n");
+
+ seq_printf(m, "CPU implementer\t: 0x%02x\n",
+ MIDR_IMPLEMENTOR(midr));
+ seq_printf(m, "CPU architecture: 8\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
+ seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
+ seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
+ }
return 0;
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1b9ad02837cf..5a1ba6e80d4e 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -186,6 +186,12 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
break;
+ case __SI_SYS:
+ err |= __put_user((compat_uptr_t)(unsigned long)
+ from->si_call_addr, &to->si_call_addr);
+ err |= __put_user(from->si_syscall, &to->si_syscall);
+ err |= __put_user(from->si_arch, &to->si_arch);
+ break;
default: /* this is just in case for now ... */
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index a564b440416a..ede186cdd452 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -147,14 +147,12 @@ cpu_resume_after_mmu:
ret
ENDPROC(cpu_resume_after_mmu)
- .data
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
#ifdef CONFIG_SMP
mrs x1, mpidr_el1
- adr x4, mpidr_hash_ptr
- ldr x5, [x4]
- add x8, x4, x5 // x8 = struct mpidr_hash phys address
+ adrp x8, mpidr_hash
+ add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
/* retrieve mpidr_hash members to compute the hash */
ldr x2, [x8, #MPIDR_HASH_MASK]
ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
@@ -164,14 +162,15 @@ ENTRY(cpu_resume)
#else
mov x7, xzr
#endif
- adr x0, sleep_save_sp
+ adrp x0, sleep_save_sp
+ add x0, x0, #:lo12:sleep_save_sp
ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
ldr x0, [x0, x7, lsl #3]
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
- adr x1, sleep_idmap_phys
+ adrp x1, sleep_idmap_phys
/* load physical address of identity map page table in x1 */
- ldr x1, [x1]
+ ldr x1, [x1, #:lo12:sleep_idmap_phys]
mov sp, x2
/*
* cpu_do_resume expects x0 to contain context physical address
@@ -180,26 +179,3 @@ ENTRY(cpu_resume)
bl cpu_do_resume // PC relative jump, MMU off
b cpu_resume_mmu // Resume MMU, never returns
ENDPROC(cpu_resume)
-
- .align 3
-mpidr_hash_ptr:
- /*
- * offset of mpidr_hash symbol from current location
- * used to obtain run-time mpidr_hash address with MMU off
- */
- .quad mpidr_hash - .
-/*
- * physical address of identity mapped page tables
- */
- .type sleep_idmap_phys, #object
-ENTRY(sleep_idmap_phys)
- .quad 0
-/*
- * struct sleep_save_sp {
- * phys_addr_t *save_ptr_stash;
- * phys_addr_t save_ptr_stash_phys;
- * };
- */
- .type sleep_save_sp, #object
-ENTRY(sleep_save_sp)
- .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b06d1d90ee8c..7ae6ee085261 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/of.h>
#include <linux/irq_work.h>
+#include <asm/alternative.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
@@ -309,6 +310,7 @@ void cpu_die(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+ apply_alternatives_all();
}
void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 13ad4dbb1615..3771b72b6569 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -126,8 +126,8 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret;
}
-extern struct sleep_save_sp sleep_save_sp;
-extern phys_addr_t sleep_idmap_phys;
+struct sleep_save_sp sleep_save_sp;
+phys_addr_t sleep_idmap_phys;
static int __init cpu_suspend_init(void)
{
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index dc47e53e9e28..28c511b06edf 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -28,29 +28,39 @@
#include <asm/cacheflush.h>
#include <asm/unistd.h>
-static inline void
-do_compat_cache_op(unsigned long start, unsigned long end, int flags)
+static long
+__do_compat_cache_op(unsigned long start, unsigned long end)
{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *vma;
+ long ret;
- if (end < start || flags)
- return;
+ do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, start);
- if (vma && vma->vm_start < end) {
- if (start < vma->vm_start)
- start = vma->vm_start;
- if (end > vma->vm_end)
- end = vma->vm_end;
- up_read(&mm->mmap_sem);
- __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end));
- return;
- }
- up_read(&mm->mmap_sem);
+ if (fatal_signal_pending(current))
+ return 0;
+
+ ret = __flush_cache_user_range(start, start + chunk);
+ if (ret)
+ return ret;
+
+ cond_resched();
+ start += chunk;
+ } while (start < end);
+
+ return 0;
}
+static inline long
+do_compat_cache_op(unsigned long start, unsigned long end, int flags)
+{
+ if (end < start || flags)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, start, end - start))
+ return -EFAULT;
+
+ return __do_compat_cache_op(start, end);
+}
/*
* Handle all unrecognised system calls.
*/
@@ -74,8 +84,7 @@ long compat_arm_syscall(struct pt_regs *regs)
* the specified region).
*/
case __ARM_NR_compat_cacheflush:
- do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
- return 0;
+ return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0];
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index b6ee26b0939a..fcb8f7b42271 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -255,12 +255,15 @@ void store_cpu_topology(unsigned int cpuid)
/* Multiprocessor system : Multi-threads per core */
cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
+ MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
} else {
/* Multiprocessor system : Single-thread per core */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
}
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h
new file mode 100644
index 000000000000..ae1dd598ea65
--- /dev/null
+++ b/arch/arm64/kernel/trace-events-emulation.h
@@ -0,0 +1,35 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM emulation
+
+#if !defined(_TRACE_EMULATION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EMULATION_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(instruction_emulation,
+
+ TP_PROTO(const char *instr, u64 addr),
+ TP_ARGS(instr, addr),
+
+ TP_STRUCT__entry(
+ __string(instr, instr)
+ __field(u64, addr)
+ ),
+
+ TP_fast_assign(
+ __assign_str(instr, instr);
+ __entry->addr = addr;
+ ),
+
+ TP_printk("instr=\"%s\" addr=0x%llx", __get_str(instr), __entry->addr)
+);
+
+#endif /* _TRACE_EMULATION_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+#define TRACE_INCLUDE_FILE trace-events-emulation
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index de1b085e7963..0a801e3743d5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -259,6 +259,69 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
}
}
+static LIST_HEAD(undef_hook);
+static DEFINE_RAW_SPINLOCK(undef_lock);
+
+void register_undef_hook(struct undef_hook *hook)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_add(&hook->node, &undef_hook);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
+}
+
+void unregister_undef_hook(struct undef_hook *hook)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_del(&hook->node);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
+}
+
+static int call_undef_hook(struct pt_regs *regs)
+{
+ struct undef_hook *hook;
+ unsigned long flags;
+ u32 instr;
+ int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+ if (!user_mode(regs))
+ return 1;
+
+ if (compat_thumb_mode(regs)) {
+ /* 16-bit Thumb instruction */
+ if (get_user(instr, (u16 __user *)pc))
+ goto exit;
+ instr = le16_to_cpu(instr);
+ if (aarch32_insn_is_wide(instr)) {
+ u32 instr2;
+
+ if (get_user(instr2, (u16 __user *)(pc + 2)))
+ goto exit;
+ instr2 = le16_to_cpu(instr2);
+ instr = (instr << 16) | instr2;
+ }
+ } else {
+ /* 32-bit ARM instruction */
+ if (get_user(instr, (u32 __user *)pc))
+ goto exit;
+ instr = le32_to_cpu(instr);
+ }
+
+ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_for_each_entry(hook, &undef_hook, node)
+ if ((instr & hook->instr_mask) == hook->instr_val &&
+ (regs->pstate & hook->pstate_mask) == hook->pstate_val)
+ fn = hook->fn;
+
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
+exit:
+ return fn ? fn(regs, instr) : 1;
+}
+
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
siginfo_t info;
@@ -268,6 +331,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (!aarch32_break_handler(regs))
return;
+ if (call_undef_hook(regs) == 0)
+ return;
+
if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
printk_ratelimit()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index edf8715ba39b..9965ec87cbec 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -11,8 +11,9 @@
#include "image.h"
-#define ARM_EXIT_KEEP(x)
-#define ARM_EXIT_DISCARD(x) x
+/* .exit.text needed in case of alternative patching */
+#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
OUTPUT_ARCH(aarch64)
ENTRY(_text)
@@ -32,6 +33,22 @@ jiffies = jiffies_64;
*(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .;
+/*
+ * The size of the PE/COFF section that covers the kernel image, which
+ * runs from stext to _edata, must be a round multiple of the PE/COFF
+ * FileAlignment, which we set to its minimum value of 0x200. 'stext'
+ * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
+ * boundary should be sufficient.
+ */
+PECOFF_FILE_ALIGNMENT = 0x200;
+
+#ifdef CONFIG_EFI
+#define PECOFF_EDATA_PADDING \
+ .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
+#else
+#define PECOFF_EDATA_PADDING
+#endif
+
SECTIONS
{
/*
@@ -100,9 +117,21 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ . = ALIGN(PAGE_SIZE);
_data = .;
_sdata = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ PECOFF_EDATA_PADDING
_edata = .;
BSS_SECTION(0, 0, 0)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index b72aa9f9215c..fbe909fb0a1a 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -761,10 +761,10 @@
.macro activate_traps
ldr x2, [x0, #VCPU_HCR_EL2]
msr hcr_el2, x2
- ldr x2, =(CPTR_EL2_TTA)
+ mov x2, #CPTR_EL2_TTA
msr cptr_el2, x2
- ldr x2, =(1 << 15) // Trap CP15 Cr=15
+ mov x2, #(1 << 15) // Trap CP15 Cr=15
msr hstr_el2, x2
mrs x2, mdcr_el2
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 4cc3b719208e..3d7c2df89946 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* VBAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
NULL, reset_val, VBAR_EL1, 0 },
+
+ /* ICC_SRE_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
+ trap_raz_wi },
+
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+
+ /* ICC_SRE */
+ { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
};
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 6e0ed93d51fe..c17967fdf5f6 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
- strb wzr, [x0]
+USER(9f, strb wzr, [x0] )
5: mov x0, #0
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index c56179ed2c09..773d37a14039 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,3 +3,4 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_ARM64_PTDUMP) += dump.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 23663837acff..2560e1e1562e 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -17,9 +17,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
#include "proc-macros.S"
@@ -138,9 +141,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
add x4, x4, x2
cmp x4, x1
b.lo 1b
-9: // ignore any faulting cache operation
dsb ish
isb
+ mov x0, #0
+ ret
+9:
+ mov x0, #-EFAULT
ret
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
@@ -210,7 +216,7 @@ __dma_clean_range:
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
-1: dc cvac, x0 // clean D / U line
+1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
add x0, x0, x2
cmp x0, x1
b.lo 1b
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
new file mode 100644
index 000000000000..bf69601be546
--- /dev/null
+++ b/arch/arm64/mm/dump.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Debug helper to dump the current kernel pagetables of the system
+ * so that we can see what the various memory ranges are set to.
+ *
+ * Derived from x86 and arm implementation:
+ * (C) Copyright 2008 Intel Corporation
+ *
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+
+#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS)
+
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+enum address_markers_idx {
+ VMALLOC_START_NR = 0,
+ VMALLOC_END_NR,
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ VMEMMAP_START_NR,
+ VMEMMAP_END_NR,
+#endif
+ PCI_START_NR,
+ PCI_END_NR,
+ FIXADDR_START_NR,
+ FIXADDR_END_NR,
+ MODULES_START_NR,
+ MODUELS_END_NR,
+ KERNEL_SPACE_NR,
+};
+
+static struct addr_marker address_markers[] = {
+ { VMALLOC_START, "vmalloc() Area" },
+ { VMALLOC_END, "vmalloc() End" },
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ { 0, "vmemmap start" },
+ { 0, "vmemmap end" },
+#endif
+ { (unsigned long) PCI_IOBASE, "PCI I/O start" },
+ { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" },
+ { FIXADDR_START, "Fixmap start" },
+ { FIXADDR_TOP, "Fixmap end" },
+ { MODULES_VADDR, "Modules start" },
+ { MODULES_END, "Modules end" },
+ { PAGE_OFFSET, "Kernel Mapping" },
+ { -1, NULL },
+};
+
+struct pg_state {
+ struct seq_file *seq;
+ const struct addr_marker *marker;
+ unsigned long start_address;
+ unsigned level;
+ u64 current_prot;
+};
+
+struct prot_bits {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+};
+
+static const struct prot_bits pte_bits[] = {
+ {
+ .mask = PTE_USER,
+ .val = PTE_USER,
+ .set = "USR",
+ .clear = " ",
+ }, {
+ .mask = PTE_RDONLY,
+ .val = PTE_RDONLY,
+ .set = "ro",
+ .clear = "RW",
+ }, {
+ .mask = PTE_PXN,
+ .val = PTE_PXN,
+ .set = "NX",
+ .clear = "x ",
+ }, {
+ .mask = PTE_SHARED,
+ .val = PTE_SHARED,
+ .set = "SHD",
+ .clear = " ",
+ }, {
+ .mask = PTE_AF,
+ .val = PTE_AF,
+ .set = "AF",
+ .clear = " ",
+ }, {
+ .mask = PTE_NG,
+ .val = PTE_NG,
+ .set = "NG",
+ .clear = " ",
+ }, {
+ .mask = PTE_UXN,
+ .val = PTE_UXN,
+ .set = "UXN",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
+ .set = "DEVICE/nGnRnE",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
+ .set = "DEVICE/nGnRE",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_DEVICE_GRE),
+ .set = "DEVICE/GRE",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL_NC),
+ .set = "MEM/NORMAL-NC",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL),
+ .set = "MEM/NORMAL",
+ }
+};
+
+struct pg_level {
+ const struct prot_bits *bits;
+ size_t num;
+ u64 mask;
+};
+
+static struct pg_level pg_level[] = {
+ {
+ }, { /* pgd */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ }, { /* pud */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ }, { /* pmd */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ }, { /* pte */
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
+ },
+};
+
+static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
+ size_t num)
+{
+ unsigned i;
+
+ for (i = 0; i < num; i++, bits++) {
+ const char *s;
+
+ if ((st->current_prot & bits->mask) == bits->val)
+ s = bits->set;
+ else
+ s = bits->clear;
+
+ if (s)
+ seq_printf(st->seq, " %s", s);
+ }
+}
+
+static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
+ u64 val)
+{
+ static const char units[] = "KMGTPE";
+ u64 prot = val & pg_level[level].mask;
+
+ if (addr < LOWEST_ADDR)
+ return;
+
+ if (!st->level) {
+ st->level = level;
+ st->current_prot = prot;
+ st->start_address = addr;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ } else if (prot != st->current_prot || level != st->level ||
+ addr >= st->marker[1].start_address) {
+ const char *unit = units;
+ unsigned long delta;
+
+ if (st->current_prot) {
+ seq_printf(st->seq, "0x%16lx-0x%16lx ",
+ st->start_address, addr);
+
+ delta = (addr - st->start_address) >> 10;
+ while (!(delta & 1023) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+ seq_printf(st->seq, "%9lu%c", delta, *unit);
+ if (pg_level[st->level].bits)
+ dump_prot(st, pg_level[st->level].bits,
+ pg_level[st->level].num);
+ seq_puts(st->seq, "\n");
+ }
+
+ if (addr >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+
+ st->start_address = addr;
+ st->current_prot = prot;
+ st->level = level;
+ }
+
+ if (addr >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+
+}
+
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+{
+ pte_t *pte = pte_offset_kernel(pmd, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ addr = start + i * PAGE_SIZE;
+ note_page(st, addr, 4, pte_val(*pte));
+ }
+}
+
+static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+{
+ pmd_t *pmd = pmd_offset(pud, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+ addr = start + i * PMD_SIZE;
+ if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd))
+ note_page(st, addr, 3, pmd_val(*pmd));
+ else
+ walk_pte(st, pmd, addr);
+ }
+}
+
+static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ pud_t *pud = pud_offset(pgd, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+ addr = start + i * PUD_SIZE;
+ if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud))
+ note_page(st, addr, 2, pud_val(*pud));
+ else
+ walk_pmd(st, pud, addr);
+ }
+}
+
+static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
+{
+ pgd_t *pgd = pgd_offset(mm, 0);
+ unsigned i;
+ unsigned long addr;
+
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+ addr = start + i * PGDIR_SIZE;
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ note_page(st, addr, 1, pgd_val(*pgd));
+ else
+ walk_pud(st, pgd, addr);
+ }
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct pg_state st = {
+ .seq = m,
+ .marker = address_markers,
+ };
+
+ walk_pgd(&st, &init_mm, LOWEST_ADDR);
+
+ note_page(&st, 0, 0, 0);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ptdump_init(void)
+{
+ struct dentry *pe;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ if (pg_level[i].bits)
+ for (j = 0; j < pg_level[i].num; j++)
+ pg_level[i].mask |= pg_level[i].bits[j].mask;
+
+ address_markers[VMEMMAP_START_NR].start_address =
+ (unsigned long)virt_to_page(PAGE_OFFSET);
+ address_markers[VMEMMAP_END_NR].start_address =
+ (unsigned long)virt_to_page(high_memory);
+
+ pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
+ &ptdump_fops);
+ return pe ? 0 : -ENOMEM;
+}
+device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 41cb6d3d6075..c11cd27ca8f5 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -380,7 +380,7 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
{ do_bad, SIGBUS, 0, "level 3 address size fault" },
- { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 494297c698ca..bac492c12fcc 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -39,6 +39,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/alternative.h>
#include "mm.h"
@@ -325,6 +326,7 @@ void __init mem_init(void)
void free_initmem(void)
{
free_initmem_default(0);
+ free_alternatives_memory();
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 4a07630a6616..cbb99c8f1e04 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -103,97 +103,10 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
}
EXPORT_SYMBOL(ioremap_cache);
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
-static inline pud_t * __init early_ioremap_pud(unsigned long addr)
-{
- pgd_t *pgd;
-
- pgd = pgd_offset_k(addr);
- BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
-
- return pud_offset(pgd, addr);
-}
-
-static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
-{
- pud_t *pud = early_ioremap_pud(addr);
-
- BUG_ON(pud_none(*pud) || pud_bad(*pud));
-
- return pmd_offset(pud, addr);
-}
-
-static inline pte_t * __init early_ioremap_pte(unsigned long addr)
-{
- pmd_t *pmd = early_ioremap_pmd(addr);
-
- BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
- return pte_offset_kernel(pmd, addr);
-}
-
+/*
+ * Must be called after early_fixmap_init
+ */
void __init early_ioremap_init(void)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
-
- pgd = pgd_offset_k(addr);
- pgd_populate(&init_mm, pgd, bm_pud);
- pud = pud_offset(pgd, addr);
- pud_populate(&init_mm, pud, bm_pmd);
- pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
-
- /*
- * The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
- */
- BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
- != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
-
- if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
- WARN_ON(1);
- pr_warn("pmd %p != %p\n",
- pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
- pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
- fix_to_virt(FIX_BTMAP_BEGIN));
- pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
- fix_to_virt(FIX_BTMAP_END));
-
- pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
- pr_warn("FIX_BTMAP_BEGIN: %d\n",
- FIX_BTMAP_BEGIN);
- }
-
early_ioremap_setup();
}
-
-void __init __early_set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
-{
- unsigned long addr = __fix_to_virt(idx);
- pte_t *pte;
-
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
- }
-
- pte = early_ioremap_pte(addr);
-
- if (pgprot_val(flags))
- set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
- else {
- pte_clear(&init_mm, addr, pte);
- flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
- }
-}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index d519f4f50c8c..50c3351df9c7 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1,2 +1 @@
extern void __init bootmem_init(void);
-extern void __init arm64_swiotlb_init(void);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 1d73662f00ff..54922d1275b8 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -47,22 +47,14 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-/*
- * Since get_random_int() returns the same value within a 1 jiffy window, we
- * will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will be
- * the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
if (current->flags & PF_RANDOMIZE)
- rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
+ rnd = (long)get_random_int() & STACK_RND_MASK;
- return rnd << (PAGE_SHIFT + 1);
+ return rnd << PAGE_SHIFT;
}
static unsigned long mmap_base(void)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0bf90d26e745..6032f3e3056a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <asm/cputype.h>
+#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -202,7 +203,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys,
+ unsigned long end, phys_addr_t phys,
int map_io)
{
pud_t *pud;
@@ -463,3 +464,96 @@ void vmemmap_free(unsigned long start, unsigned long end)
{
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+
+static inline pud_t * fixmap_pud(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+
+ BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+
+ return pud_offset(pgd, addr);
+}
+
+static inline pmd_t * fixmap_pmd(unsigned long addr)
+{
+ pud_t *pud = fixmap_pud(addr);
+
+ BUG_ON(pud_none(*pud) || pud_bad(*pud));
+
+ return pmd_offset(pud, addr);
+}
+
+static inline pte_t * fixmap_pte(unsigned long addr)
+{
+ pmd_t *pmd = fixmap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_fixmap_init(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr = FIXADDR_START;
+
+ pgd = pgd_offset_k(addr);
+ pgd_populate(&init_mm, pgd, bm_pud);
+ pud = pud_offset(pgd, addr);
+ pud_populate(&init_mm, pud, bm_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not preparted:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
+ || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmd %p != %p, %p\n",
+ pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
+ fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
+ }
+}
+
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ pte = fixmap_pte(addr);
+
+ if (pgprot_val(flags)) {
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ } else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 6682b361d3ac..71ca104f97bd 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -35,9 +35,9 @@ static struct kmem_cache *pgd_cache;
pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (PGD_SIZE == PAGE_SIZE)
- return (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)__get_free_page(PGALLOC_GFP);
else
- return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
+ return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 41f1e3e2ea24..edba042b2325 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -60,7 +60,7 @@ struct jit_ctx {
const struct bpf_prog *prog;
int idx;
int tmp_used;
- int body_offset;
+ int epilogue_offset;
int *offset;
u32 *image;
};
@@ -130,8 +130,8 @@ static void jit_fill_hole(void *area, unsigned int size)
static inline int epilogue_offset(const struct jit_ctx *ctx)
{
- int to = ctx->offset[ctx->prog->len - 1];
- int from = ctx->idx - ctx->body_offset;
+ int to = ctx->epilogue_offset;
+ int from = ctx->idx;
return to - from;
}
@@ -463,6 +463,8 @@ emit_cond_jmp:
}
/* function return */
case BPF_JMP | BPF_EXIT:
+ /* Optimization: when last instruction is EXIT,
+ simply fallthrough to epilogue. */
if (i == ctx->prog->len - 1)
break;
jmp_offset = epilogue_offset(ctx);
@@ -685,11 +687,13 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
/* 1. Initial fake pass to compute ctx->idx. */
- /* Fake pass to fill in ctx->offset. */
+ /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
if (build_body(&ctx))
goto out;
build_prologue(&ctx);
+
+ ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx);
/* Now we know the actual image size. */
@@ -706,7 +710,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
build_prologue(&ctx);
- ctx.body_offset = ctx.idx;
if (build_body(&ctx)) {
bpf_jit_binary_free(header);
goto out;
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 37b75602adf6..cc92cdb9994c 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -17,7 +17,7 @@
#include <linux/spi/spi.h>
#include <linux/usb/atmel_usba_udc.h>
-#include <mach/atmel-mci.h>
+#include <linux/platform_data/mmc-atmel-mci.h>
#include <linux/atmel-mci.h>
#include <asm/io.h>
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
deleted file mode 100644
index 11d7f4b28dc8..000000000000
--- a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __MACH_ATMEL_MCI_H
-#define __MACH_ATMEL_MCI_H
-
-#include <linux/platform_data/dma-dw.h>
-
-/**
- * struct mci_dma_data - DMA data for MCI interface
- */
-struct mci_dma_data {
- struct dw_dma_slave sdata;
-};
-
-/* accessor macros */
-#define slave_data_ptr(s) (&(s)->sdata)
-#define find_slave_dev(s) ((s)->sdata.dma_dev)
-
-#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index ec6b9acb6bea..dbe46f43884d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
for (i = 0; i < npages; i++) {
pfn = gfn_to_pfn(kvm, base_gfn + i);
- if (!kvm_is_mmio_pfn(pfn)) {
+ if (!kvm_is_reserved_pfn(pfn)) {
kvm_set_pmt_entry(kvm, base_gfn + i,
pfn << PAGE_SHIFT,
_PAGE_AR_RWX | _PAGE_MA_WB);
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 01a62161b08a..192b00f098f4 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -858,6 +858,24 @@ static struct platform_device *atari_netusbee_devices[] __initdata = {
};
#endif /* CONFIG_ATARI_ETHERNEC */
+#ifdef CONFIG_ATARI_SCSI
+static const struct resource atari_scsi_st_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_MFP_FSCSI,
+ .end = IRQ_MFP_FSCSI,
+ },
+};
+
+static const struct resource atari_scsi_tt_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_TT_MFP_SCSI,
+ .end = IRQ_TT_MFP_SCSI,
+ },
+};
+#endif
+
int __init atari_platform_init(void)
{
int rv = 0;
@@ -892,6 +910,15 @@ int __init atari_platform_init(void)
}
#endif
+#ifdef CONFIG_ATARI_SCSI
+ if (ATARIHW_PRESENT(ST_SCSI))
+ platform_device_register_simple("atari_scsi", -1,
+ atari_scsi_st_rsrc, ARRAY_SIZE(atari_scsi_st_rsrc));
+ else if (ATARIHW_PRESENT(TT_SCSI))
+ platform_device_register_simple("atari_scsi", -1,
+ atari_scsi_tt_rsrc, ARRAY_SIZE(atari_scsi_tt_rsrc));
+#endif
+
return rv;
}
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index ddbf43ca8858..e5a66596b116 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -59,6 +59,31 @@ static irqreturn_t stdma_int (int irq, void *dummy);
/************************* End of Prototypes **************************/
+/**
+ * stdma_try_lock - attempt to acquire ST DMA interrupt "lock"
+ * @handler: interrupt handler to use after acquisition
+ *
+ * Returns !0 if lock was acquired; otherwise 0.
+ */
+
+int stdma_try_lock(irq_handler_t handler, void *data)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (stdma_locked) {
+ local_irq_restore(flags);
+ return 0;
+ }
+
+ stdma_locked = 1;
+ stdma_isr = handler;
+ stdma_isr_data = data;
+ local_irq_restore(flags);
+ return 1;
+}
+EXPORT_SYMBOL(stdma_try_lock);
+
/*
* Function: void stdma_lock( isrfunc isr, void *data )
@@ -78,19 +103,10 @@ static irqreturn_t stdma_int (int irq, void *dummy);
void stdma_lock(irq_handler_t handler, void *data)
{
- unsigned long flags;
-
- local_irq_save(flags); /* protect lock */
-
/* Since the DMA is used for file system purposes, we
have to sleep uninterruptible (there may be locked
buffers) */
- wait_event(stdma_wait, !stdma_locked);
-
- stdma_locked = 1;
- stdma_isr = handler;
- stdma_isr_data = data;
- local_irq_restore(flags);
+ wait_event(stdma_wait, stdma_try_lock(handler, data));
}
EXPORT_SYMBOL(stdma_lock);
@@ -122,22 +138,25 @@ void stdma_release(void)
EXPORT_SYMBOL(stdma_release);
-/*
- * Function: int stdma_others_waiting( void )
- *
- * Purpose: Check if someone waits for the ST-DMA lock.
- *
- * Inputs: none
- *
- * Returns: 0 if no one is waiting, != 0 otherwise
+/**
+ * stdma_is_locked_by - allow lock holder to check whether it needs to release.
+ * @handler: interrupt handler previously used to acquire lock.
*
+ * Returns !0 if locked for the given handler; 0 otherwise.
*/
-int stdma_others_waiting(void)
+int stdma_is_locked_by(irq_handler_t handler)
{
- return waitqueue_active(&stdma_wait);
+ unsigned long flags;
+ int result;
+
+ local_irq_save(flags);
+ result = stdma_locked && (stdma_isr == handler);
+ local_irq_restore(flags);
+
+ return result;
}
-EXPORT_SYMBOL(stdma_others_waiting);
+EXPORT_SYMBOL(stdma_is_locked_by);
/*
diff --git a/arch/m68k/include/asm/atari_stdma.h b/arch/m68k/include/asm/atari_stdma.h
index 8e389b7fa70c..d24e34d870dc 100644
--- a/arch/m68k/include/asm/atari_stdma.h
+++ b/arch/m68k/include/asm/atari_stdma.h
@@ -8,11 +8,11 @@
/***************************** Prototypes *****************************/
+int stdma_try_lock(irq_handler_t, void *);
void stdma_lock(irq_handler_t handler, void *data);
void stdma_release( void );
-int stdma_others_waiting( void );
int stdma_islocked( void );
-void *stdma_locked_by( void );
+int stdma_is_locked_by(irq_handler_t);
void stdma_init( void );
/************************* End of Prototypes **************************/
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index d323b2c2d07d..29c7c6c3a5f2 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -53,6 +53,10 @@ struct mac_model
#define MAC_SCSI_QUADRA 2
#define MAC_SCSI_QUADRA2 3
#define MAC_SCSI_QUADRA3 4
+#define MAC_SCSI_IIFX 5
+#define MAC_SCSI_DUO 6
+#define MAC_SCSI_CCL 7
+#define MAC_SCSI_LATE 8
#define MAC_IDE_NONE 0
#define MAC_IDE_QUADRA 1
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 4ef7a54813e6..75e75d7b1702 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 354
+#define NR_syscalls 355
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index b419c6b7ac37..2c1bec9a14b6 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -359,5 +359,6 @@
#define __NR_renameat2 351
#define __NR_getrandom 352
#define __NR_memfd_create 353
+#define __NR_bpf 354
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 05b46c2b08b8..2ca219e184cd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -374,4 +374,5 @@ ENTRY(sys_call_table)
.long sys_renameat2
.long sys_getrandom
.long sys_memfd_create
+ .long sys_bpf
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index a471eab1a4dd..e9c3756139fc 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -278,7 +278,7 @@ static struct mac_model mac_data_table[] = {
.name = "IIfx",
.adb_type = MAC_ADB_IOP,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_IIFX,
.scc_type = MAC_SCC_IOP,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
@@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = {
.name = "Color Classic",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_CCL,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = {
.name = "Color Classic II",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_CCL,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = {
.name = "Performa 520",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_CCL,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = {
.name = "Performa 550",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_CCL,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = {
.name = "TV",
.adb_type = MAC_ADB_CUDA,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_CCL,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -712,7 +712,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook 190",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_QUADRA,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LATE,
.ide_type = MAC_IDE_BABOON,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -722,7 +722,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook 520",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_QUADRA,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_LATE,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
.nubus_type = MAC_NUBUS,
@@ -740,7 +740,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook Duo 210",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -749,7 +749,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook Duo 230",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -758,7 +758,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook Duo 250",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -767,7 +767,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook Duo 270c",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -776,7 +776,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook Duo 280",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -785,7 +785,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook Duo 280c",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_IICI,
- .scsi_type = MAC_SCSI_OLD,
+ .scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -929,6 +929,70 @@ static struct platform_device swim_pdev = {
.resource = &swim_rsrc,
};
+static const struct resource mac_scsi_iifx_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_MAC_SCSI,
+ .end = IRQ_MAC_SCSI,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x50008000,
+ .end = 0x50009FFF,
+ },
+};
+
+static const struct resource mac_scsi_duo_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = 0xFEE02000,
+ .end = 0xFEE03FFF,
+ },
+};
+
+static const struct resource mac_scsi_old_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_MAC_SCSI,
+ .end = IRQ_MAC_SCSI,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x50010000,
+ .end = 0x50011FFF,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x50006000,
+ .end = 0x50007FFF,
+ },
+};
+
+static const struct resource mac_scsi_late_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_MAC_SCSI,
+ .end = IRQ_MAC_SCSI,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x50010000,
+ .end = 0x50011FFF,
+ },
+};
+
+static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_MAC_SCSI,
+ .end = IRQ_MAC_SCSI,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x50F10000,
+ .end = 0x50F11FFF,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x50F06000,
+ .end = 0x50F07FFF,
+ },
+};
+
static struct platform_device esp_0_pdev = {
.name = "mac_esp",
.id = 0,
@@ -1000,6 +1064,60 @@ int __init mac_platform_init(void)
(macintosh_config->ident == MAC_MODEL_Q950))
platform_device_register(&esp_1_pdev);
break;
+ case MAC_SCSI_IIFX:
+ /* Addresses from The Guide to Mac Family Hardware.
+ * $5000 8000 - $5000 9FFF: SCSI DMA
+ * $5000 C000 - $5000 DFFF: Alternate SCSI (DMA)
+ * $5000 E000 - $5000 FFFF: Alternate SCSI (Hsk)
+ * The SCSI DMA custom IC embeds the 53C80 core. mac_scsi does
+ * not make use of its DMA or hardware handshaking logic.
+ */
+ platform_device_register_simple("mac_scsi", 0,
+ mac_scsi_iifx_rsrc, ARRAY_SIZE(mac_scsi_iifx_rsrc));
+ break;
+ case MAC_SCSI_DUO:
+ /* Addresses from the Duo Dock II Developer Note.
+ * $FEE0 2000 - $FEE0 3FFF: normal mode
+ * $FEE0 4000 - $FEE0 5FFF: pseudo DMA without /DRQ
+ * $FEE0 6000 - $FEE0 7FFF: pseudo DMA with /DRQ
+ * The NetBSD code indicates that both 5380 chips share
+ * an IRQ (?) which would need careful handling (see mac_esp).
+ */
+ platform_device_register_simple("mac_scsi", 1,
+ mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc));
+ /* fall through */
+ case MAC_SCSI_OLD:
+ /* Addresses from Developer Notes for Duo System,
+ * PowerBook 180 & 160, 140 & 170, Macintosh IIsi
+ * and also from The Guide to Mac Family Hardware for
+ * SE/30, II, IIx, IIcx, IIci.
+ * $5000 6000 - $5000 7FFF: pseudo-DMA with /DRQ
+ * $5001 0000 - $5001 1FFF: normal mode
+ * $5001 2000 - $5001 3FFF: pseudo-DMA without /DRQ
+ * GMFH says that $5000 0000 - $50FF FFFF "wraps
+ * $5000 0000 - $5001 FFFF eight times" (!)
+ * mess.org says IIci and Color Classic do not alias
+ * I/O address space.
+ */
+ platform_device_register_simple("mac_scsi", 0,
+ mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
+ break;
+ case MAC_SCSI_LATE:
+ /* PDMA logic in 68040 PowerBooks is somehow different to
+ * '030 models. It's probably more like Quadras (see mac_esp).
+ */
+ platform_device_register_simple("mac_scsi", 0,
+ mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
+ break;
+ case MAC_SCSI_CCL:
+ /* Addresses from the Color Classic Developer Note.
+ * $50F0 6000 - $50F0 7FFF: SCSI handshake
+ * $50F1 0000 - $50F1 1FFF: SCSI
+ * $50F1 2000 - $50F1 3FFF: SCSI DMA
+ */
+ platform_device_register_simple("mac_scsi", 0,
+ mac_scsi_ccl_rsrc, ARRAY_SIZE(mac_scsi_ccl_rsrc));
+ break;
}
/*
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index acaff6a49e35..b09a3cb29b68 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -94,7 +94,6 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index f59ec58083f8..a8b942bf7163 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -16,6 +16,7 @@
#include <linux/console.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/platform_device.h>
#include <asm/oplib.h>
#include <asm/setup.h>
@@ -27,6 +28,7 @@
#include <asm/sun3mmu.h>
#include <asm/rtc.h>
#include <asm/machdep.h>
+#include <asm/machines.h>
#include <asm/idprom.h>
#include <asm/intersil.h>
#include <asm/irq.h>
@@ -169,3 +171,61 @@ static void __init sun3_sched_init(irq_handler_t timer_routine)
intersil_clear();
}
+#ifdef CONFIG_SUN3_SCSI
+
+static const struct resource sun3_scsi_vme_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = SUN3_VEC_VMESCSI0,
+ .end = SUN3_VEC_VMESCSI0,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0xff200000,
+ .end = 0xff200021,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ .start = SUN3_VEC_VMESCSI1,
+ .end = SUN3_VEC_VMESCSI1,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0xff204000,
+ .end = 0xff204021,
+ },
+};
+
+/*
+ * Int: level 2 autovector
+ * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0>
+ */
+static const struct resource sun3_scsi_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = 2,
+ .end = 2,
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = 0x00140000,
+ .end = 0x0014001f,
+ },
+};
+
+int __init sun3_platform_init(void)
+{
+ switch (idprom->id_machtype) {
+ case SM_SUN3 | SM_3_160:
+ case SM_SUN3 | SM_3_260:
+ platform_device_register_simple("sun3_scsi_vme", -1,
+ sun3_scsi_vme_rsrc, ARRAY_SIZE(sun3_scsi_vme_rsrc));
+ break;
+ case SM_SUN3 | SM_3_50:
+ case SM_SUN3 | SM_3_60:
+ platform_device_register_simple("sun3_scsi", -1,
+ sun3_scsi_rsrc, ARRAY_SIZE(sun3_scsi_rsrc));
+ break;
+ }
+ return 0;
+}
+
+arch_initcall(sun3_platform_init);
+
+#endif
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index 8aa97817cc8c..99b6ded54849 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -14,7 +14,6 @@
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <linux/pagemap.h>
-#include <asm-generic/tlb.h>
#ifdef CONFIG_MMU
#define tlb_start_vma(tlb, vma) do { } while (0)
@@ -22,4 +21,6 @@
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#endif
+#include <asm-generic/tlb.h>
+
#endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f43aa536c517..9536ef912f59 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2101,9 +2101,17 @@ config 64BIT_PHYS_ADDR
config ARCH_PHYS_ADDR_T_64BIT
def_bool 64BIT_PHYS_ADDR
+choice
+ prompt "SmartMIPS or microMIPS ASE support"
+
+config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
+ bool "None"
+ help
+ Select this if you want neither microMIPS nor SmartMIPS support
+
config CPU_HAS_SMARTMIPS
depends on SYS_SUPPORTS_SMARTMIPS
- bool "Support for the SmartMIPS ASE"
+ bool "SmartMIPS"
help
SmartMIPS is a extension of the MIPS32 architecture aimed at
increased security at both hardware and software level for
@@ -2115,11 +2123,13 @@ config CPU_HAS_SMARTMIPS
config CPU_MICROMIPS
depends on SYS_SUPPORTS_MICROMIPS
- bool "Build kernel using microMIPS ISA"
+ bool "microMIPS"
help
When this option is enabled the kernel will be built using the
microMIPS ISA
+endchoice
+
config CPU_HAS_MSA
bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
depends on CPU_SUPPORTS_MSA
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 23cb94806fbc..58076472bdd8 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -93,6 +93,15 @@ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+#
+# pass -msoft-float to GAS if it supports it. However on newer binutils
+# (specifically newer than 2.24.51.20140728) we then also need to explicitly
+# set ".set hardfloat" in all files which manipulate floating point registers.
+#
+ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
+ cflags-y += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
+endif
+
cflags-y += -ffreestanding
#
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 741734049675..2bc4aa95944e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -809,6 +809,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
.flags = IRQCHIP_SET_TYPE_MASKED,
};
@@ -823,6 +824,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = {
.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
.flags = IRQCHIP_SET_TYPE_MASKED,
};
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index e38c2811d4e2..cdac7b3eeaf7 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -13,6 +13,8 @@
#include <asm/mipsregs.h>
.macro fpu_save_single thread tmp=t0
+ .set push
+ SET_HARDFLOAT
cfc1 \tmp, fcr31
swc1 $f0, THREAD_FPR0_LS64(\thread)
swc1 $f1, THREAD_FPR1_LS64(\thread)
@@ -47,9 +49,12 @@
swc1 $f30, THREAD_FPR30_LS64(\thread)
swc1 $f31, THREAD_FPR31_LS64(\thread)
sw \tmp, THREAD_FCR31(\thread)
+ .set pop
.endm
.macro fpu_restore_single thread tmp=t0
+ .set push
+ SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
lwc1 $f0, THREAD_FPR0_LS64(\thread)
lwc1 $f1, THREAD_FPR1_LS64(\thread)
@@ -84,6 +89,7 @@
lwc1 $f30, THREAD_FPR30_LS64(\thread)
lwc1 $f31, THREAD_FPR31_LS64(\thread)
ctc1 \tmp, fcr31
+ .set pop
.endm
.macro cpu_save_nonscratch thread
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index cd9a98bc8f60..6caf8766b80f 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -57,6 +57,8 @@
#endif /* CONFIG_CPU_MIPSR2 */
.macro fpu_save_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
cfc1 \tmp, fcr31
sdc1 $f0, THREAD_FPR0_LS64(\thread)
sdc1 $f2, THREAD_FPR2_LS64(\thread)
@@ -75,11 +77,13 @@
sdc1 $f28, THREAD_FPR28_LS64(\thread)
sdc1 $f30, THREAD_FPR30_LS64(\thread)
sw \tmp, THREAD_FCR31(\thread)
+ .set pop
.endm
.macro fpu_save_16odd thread
.set push
.set mips64r2
+ SET_HARDFLOAT
sdc1 $f1, THREAD_FPR1_LS64(\thread)
sdc1 $f3, THREAD_FPR3_LS64(\thread)
sdc1 $f5, THREAD_FPR5_LS64(\thread)
@@ -110,6 +114,8 @@
.endm
.macro fpu_restore_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
ldc1 $f0, THREAD_FPR0_LS64(\thread)
ldc1 $f2, THREAD_FPR2_LS64(\thread)
@@ -133,6 +139,7 @@
.macro fpu_restore_16odd thread
.set push
.set mips64r2
+ SET_HARDFLOAT
ldc1 $f1, THREAD_FPR1_LS64(\thread)
ldc1 $f3, THREAD_FPR3_LS64(\thread)
ldc1 $f5, THREAD_FPR5_LS64(\thread)
@@ -277,6 +284,7 @@
.macro cfcmsa rd, cs
.set push
.set noat
+ SET_HARDFLOAT
.insn
.word CFC_MSA_INSN | (\cs << 11)
move \rd, $1
@@ -286,6 +294,7 @@
.macro ctcmsa cd, rs
.set push
.set noat
+ SET_HARDFLOAT
move $1, \rs
.word CTC_MSA_INSN | (\cd << 6)
.set pop
@@ -294,6 +303,7 @@
.macro ld_d wd, off, base
.set push
.set noat
+ SET_HARDFLOAT
add $1, \base, \off
.word LDD_MSA_INSN | (\wd << 6)
.set pop
@@ -302,6 +312,7 @@
.macro st_d wd, off, base
.set push
.set noat
+ SET_HARDFLOAT
add $1, \base, \off
.word STD_MSA_INSN | (\wd << 6)
.set pop
@@ -310,6 +321,7 @@
.macro copy_u_w rd, ws, n
.set push
.set noat
+ SET_HARDFLOAT
.insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
@@ -320,6 +332,7 @@
.macro copy_u_d rd, ws, n
.set push
.set noat
+ SET_HARDFLOAT
.insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
@@ -330,6 +343,7 @@
.macro insert_w wd, n, rs
.set push
.set noat
+ SET_HARDFLOAT
/* move triggers an assembler bug... */
or $1, \rs, zero
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
@@ -339,6 +353,7 @@
.macro insert_d wd, n, rs
.set push
.set noat
+ SET_HARDFLOAT
/* move triggers an assembler bug... */
or $1, \rs, zero
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
@@ -381,6 +396,7 @@
st_d 31, THREAD_FPR31, \thread
.set push
.set noat
+ SET_HARDFLOAT
cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread)
.set pop
@@ -389,6 +405,7 @@
.macro msa_restore_all thread
.set push
.set noat
+ SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread)
ctcmsa MSA_CSR, $1
.set pop
@@ -441,6 +458,7 @@
.macro msa_init_all_upper
.set push
.set noat
+ SET_HARDFLOAT
not $1, zero
msa_init_upper 0
.set pop
diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h
index 429481f9028d..f184ba088532 100644
--- a/arch/mips/include/asm/fpregdef.h
+++ b/arch/mips/include/asm/fpregdef.h
@@ -14,6 +14,20 @@
#include <asm/sgidefs.h>
+/*
+ * starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing
+ * hardfloat and softfloat object files. The kernel build uses soft-float by
+ * default, so we also need to pass -msoft-float along to GAS if it supports it.
+ * But this in turn causes assembler errors in files which access hardfloat
+ * registers. We detect if GAS supports "-msoft-float" in the Makefile and
+ * explicitly put ".set hardfloat" where floating point registers are touched.
+ */
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define SET_HARDFLOAT .set hardfloat
+#else
+#define SET_HARDFLOAT
+#endif
+
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 4d0aeda68397..dd562414cd5e 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -145,8 +145,8 @@ static inline void lose_fpu(int save)
if (is_msa_enabled()) {
if (save) {
save_msa(current);
- asm volatile("cfc1 %0, $31"
- : "=r"(current->thread.fpu.fcr31));
+ current->thread.fpu.fcr31 =
+ read_32bit_cp1_register(CP1_STATUS);
}
disable_msa();
clear_thread_flag(TIF_USEDMSA);
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e194f957ca8c..fdbff44e5482 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -20,9 +20,15 @@
#define WORD_INSN ".word"
#endif
+#ifdef CONFIG_CPU_MICROMIPS
+#define NOP_INSN "nop32"
+#else
+#define NOP_INSN "nop"
+#endif
+
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm_volatile_goto("1:\tnop\n\t"
+ asm_volatile_goto("1:\t" NOP_INSN "\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 7d28f95b0512..6d69332f21ec 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -41,10 +41,8 @@
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
-#define cpu_has_mips32r1 0
#define cpu_has_mips32r2 0
#define cpu_has_mips3d 0
-#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
#define cpu_has_mipsmt 0
#define cpu_has_prefetch 0
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index cf3b580c3df6..22a135ac91de 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -661,6 +661,8 @@
#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
/* proAptiv FTLB on/off bit */
#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15)
+/* FTLB probability bits */
+#define MIPS_CONF6_FTLBP_SHIFT (16)
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
@@ -1324,7 +1326,7 @@ do { \
/*
* Macros to access the floating point coprocessor control registers
*/
-#define read_32bit_cp1_register(source) \
+#define _read_32bit_cp1_register(source, gas_hardfloat) \
({ \
int __res; \
\
@@ -1334,12 +1336,21 @@ do { \
" # gas fails to assemble cfc1 for some archs, \n" \
" # like Octeon. \n" \
" .set mips1 \n" \
+ " "STR(gas_hardfloat)" \n" \
" cfc1 %0,"STR(source)" \n" \
" .set pop \n" \
: "=r" (__res)); \
__res; \
})
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define read_32bit_cp1_register(source) \
+ _read_32bit_cp1_register(source, .set hardfloat)
+#else
+#define read_32bit_cp1_register(source) \
+ _read_32bit_cp1_register(source, )
+#endif
+
#ifdef HAVE_AS_DSP
#define rddsp(mask) \
({ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 4520adc8699b..cd6e0afc6833 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
*/
static inline void protected_writeback_dcache_line(unsigned long addr)
{
+#ifdef CONFIG_EVA
+ protected_cachee_op(Hit_Writeback_Inv_D, addr);
+#else
protected_cache_op(Hit_Writeback_Inv_D, addr);
+#endif
}
static inline void protected_writeback_scache_line(unsigned long addr)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index a10951090234..22a5624e2fd2 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -301,7 +301,8 @@ do { \
__get_kernel_common((x), size, __gu_ptr); \
else \
__get_user_common((x), size, __gu_ptr); \
- } \
+ } else \
+ (x) = 0; \
\
__gu_err; \
})
@@ -316,6 +317,7 @@ do { \
" .insn \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %4 \n" \
+ " move %1, $0 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
@@ -630,6 +632,7 @@ do { \
" .insn \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %4 \n" \
+ " move %1, $0 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
@@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void);
"jal\t" #destination "\n\t"
#endif
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
-#define DADDI_SCRATCH "$0"
-#else
+#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
+ defined(CONFIG_CPU_HAS_PREFETCH))
#define DADDI_SCRATCH "$3"
+#else
+#define DADDI_SCRATCH "$0"
#endif
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
@@ -1418,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n)
}
/*
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
* Context: User context only. This function may sleep.
@@ -1427,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n)
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
+ * If the string is too long, returns a value greater than @n.
*/
static inline long strnlen_user(const char __user *s, long n)
{
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index fdb4923777d1..d001bb1ad177 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -375,16 +375,17 @@
#define __NR_seccomp (__NR_Linux + 352)
#define __NR_getrandom (__NR_Linux + 353)
#define __NR_memfd_create (__NR_Linux + 354)
+#define __NR_bpf (__NR_Linux + 355)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 354
+#define __NR_Linux_syscalls 355
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 354
+#define __NR_O32_Linux_syscalls 355
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -707,16 +708,17 @@
#define __NR_seccomp (__NR_Linux + 312)
#define __NR_getrandom (__NR_Linux + 313)
#define __NR_memfd_create (__NR_Linux + 314)
+#define __NR_bpf (__NR_Linux + 315)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 314
+#define __NR_Linux_syscalls 315
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 314
+#define __NR_64_Linux_syscalls 315
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1043,15 +1045,16 @@
#define __NR_seccomp (__NR_Linux + 316)
#define __NR_getrandom (__NR_Linux + 317)
#define __NR_memfd_create (__NR_Linux + 318)
+#define __NR_bpf (__NR_Linux + 319)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 318
+#define __NR_Linux_syscalls 319
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 318
+#define __NR_N32_Linux_syscalls 319
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 290c23b51678..86495072a922 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end:
END(bmips_reset_nmi_vec)
.set pop
- .previous
/***********************************************************************
* CPU1 warm restart vector (used for second and subsequent boots).
@@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01)
jr ra
END(bmips_enable_xks01)
-
- .previous
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 7b2df224f041..4d7d99d601cc 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -144,7 +144,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
case mm_bc1t_op:
preempt_disable();
if (is_fpu_owner())
- asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
@@ -562,11 +562,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case cop1_op:
preempt_disable();
if (is_fpu_owner())
- asm volatile(
- ".set push\n"
- "\t.set mips1\n"
- "\tcfc1\t%0,$31\n"
- "\t.set pop" : "=r" (fcr31));
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index e6e97d2a5c9e..0384b05ab5a0 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -229,6 +229,7 @@ LEAF(mips_cps_core_init)
nop
.set push
+ .set mips32r2
.set mt
/* Only allow 1 TC per VPE to execute... */
@@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes)
nop
.set push
+ .set mips32r2
.set mt
1: /* Enter VPE configuration state */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 94c4a0c0a577..dc49cf30c2db 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
static char unknown_isa[] = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
+static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
+{
+
+ unsigned int probability = c->tlbsize / c->tlbsizevtlb;
+
+ /*
+ * 0 = All TLBWR instructions go to FTLB
+ * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
+ * FTLB and 1 goes to the VTLB.
+ * 2 = 7:1: As above with 7:1 ratio.
+ * 3 = 3:1: As above with 3:1 ratio.
+ *
+ * Use the linear midpoint as the probability threshold.
+ */
+ if (probability >= 12)
+ return 1;
+ else if (probability >= 6)
+ return 2;
+ else
+ /*
+ * So FTLB is less than 4 times bigger than VTLB.
+ * A 3:1 ratio can still be useful though.
+ */
+ return 3;
+}
+
static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
{
unsigned int config6;
@@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
case CPU_P5600:
/* proAptiv & related cores use Config6 to enable the FTLB */
config6 = read_c0_config6();
+ /* Clear the old probability value */
+ config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
if (enable)
/* Enable FTLB */
- write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+ write_c0_config6(config6 |
+ (calculate_ftlb_probability(c)
+ << MIPS_CONF6_FTLBP_SHIFT)
+ | MIPS_CONF6_FTLBEN);
else
/* Disable FTLB */
write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
@@ -757,31 +788,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_LOONGSON2;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2e");
+ set_isa(c, MIPS_CPU_ISA_III);
break;
case PRID_REV_LOONGSON2F:
c->cputype = CPU_LOONGSON2;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2f");
+ set_isa(c, MIPS_CPU_ISA_III);
break;
case PRID_REV_LOONGSON3A:
c->cputype = CPU_LOONGSON3;
- c->writecombine = _CACHE_UNCACHED_ACCELERATED;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
c->cputype = CPU_LOONGSON3;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3b");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
break;
}
- set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS |
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
c->tlbsize = 64;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
case PRID_IMP_LOONGSON_32: /* Loongson-1 */
decode_configs(c);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ac35e12cb1f3..a5e26dd90592 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -358,6 +358,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
+ SET_HARDFLOAT
cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 6001610cfe55..dda800e9e731 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -18,31 +18,53 @@
#ifdef HAVE_JUMP_LABEL
-#define J_RANGE_MASK ((1ul << 28) - 1)
+/*
+ * Define parameters for the standard MIPS and the microMIPS jump
+ * instruction encoding respectively:
+ *
+ * - the ISA bit of the target, either 0 or 1 respectively,
+ *
+ * - the amount the jump target address is shifted right to fit in the
+ * immediate field of the machine instruction, either 2 or 1,
+ *
+ * - the mask determining the size of the jump region relative to the
+ * delay-slot instruction, either 256MB or 128MB,
+ *
+ * - the jump target alignment, either 4 or 2 bytes.
+ */
+#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS)
+#define J_RANGE_SHIFT (2 - J_ISA_BIT)
+#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1)
+#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1)
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
+ union mips_instruction *insn_p;
union mips_instruction insn;
- union mips_instruction *insn_p =
- (union mips_instruction *)(unsigned long)e->code;
- /* Jump only works within a 256MB aligned region. */
- BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
+ insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
+
+ /* Jump only works within an aligned region its delay slot is in. */
+ BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
- /* Target must have 4 byte alignment. */
- BUG_ON((e->target & 3) != 0);
+ /* Target must have the right alignment and ISA must be preserved. */
+ BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
if (type == JUMP_LABEL_ENABLE) {
- insn.j_format.opcode = j_op;
- insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
+ insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
+ insn.j_format.target = e->target >> J_RANGE_SHIFT;
} else {
insn.word = 0; /* nop */
}
get_online_cpus();
mutex_lock(&text_mutex);
- *insn_p = insn;
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
+ insn_p->halfword[0] = insn.word >> 16;
+ insn_p->halfword[1] = insn.word;
+ } else
+ *insn_p = insn;
flush_icache_range((unsigned long)insn_p,
(unsigned long)insn_p + sizeof(*insn_p));
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index f31063dbdaeb..5ce3b746cedc 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -28,6 +28,8 @@
.set mips1
/* Save floating point context */
LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
li v0, 0 # assume success
cfc1 t1,fcr31
EX(swc1 $f0,(SC_FPREGS+0)(a0))
@@ -65,6 +67,7 @@ LEAF(_save_fp_context)
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
jr ra
+ .set pop
.set nomacro
EX(sw t0,(SC_FPC_EIR)(a0))
.set macro
@@ -80,6 +83,8 @@ LEAF(_save_fp_context)
* stack frame which might have been changed by the user.
*/
LEAF(_restore_fp_context)
+ .set push
+ SET_HARDFLOAT
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
EX(lwc1 $f0,(SC_FPREGS+0)(a0))
@@ -116,6 +121,7 @@ LEAF(_restore_fp_context)
EX(lwc1 $f31,(SC_FPREGS+248)(a0))
jr ra
ctc1 t0,fcr31
+ .set pop
END(_restore_fp_context)
.set reorder
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 20b7b040e76f..435ea652f5fa 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -120,6 +120,9 @@ LEAF(_restore_fp)
#define FPU_DEFAULT 0x00000000
+ .set push
+ SET_HARDFLOAT
+
LEAF(_init_fpu)
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
@@ -165,3 +168,5 @@ LEAF(_init_fpu)
mtc1 t0, $f31
jr ra
END(_init_fpu)
+
+ .set pop
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 8352523568e6..6c160c67984c 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -19,8 +19,12 @@
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
.macro EX insn, reg, src
.set push
+ SET_HARDFLOAT
.set nomacro
.ex\@: \insn \reg, \src
.set pop
@@ -33,12 +37,17 @@
.set arch=r4000
LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
cfc1 t1, fcr31
+ .set pop
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
+ SET_HARDFLOAT
#ifdef CONFIG_CPU_MIPS32_R2
- .set mips64r2
+ .set mips32r2
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5
bgez t0, 1f # skip storing odd if FR=0
@@ -64,6 +73,8 @@ LEAF(_save_fp_context)
1: .set pop
#endif
+ .set push
+ SET_HARDFLOAT
/* Store the 16 even double precision registers */
EX sdc1 $f0, SC_FPREGS+0(a0)
EX sdc1 $f2, SC_FPREGS+16(a0)
@@ -84,11 +95,14 @@ LEAF(_save_fp_context)
EX sw t1, SC_FPC_CSR(a0)
jr ra
li v0, 0 # success
+ .set pop
END(_save_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
/* Save 32-bit process floating point context */
LEAF(_save_fp_context32)
+ .set push
+ SET_HARDFLOAT
cfc1 t1, fcr31
mfc0 t0, CP0_STATUS
@@ -134,6 +148,7 @@ LEAF(_save_fp_context32)
EX sw t1, SC32_FPC_CSR(a0)
cfc1 t0, $0 # implementation/version
EX sw t0, SC32_FPC_EIR(a0)
+ .set pop
jr ra
li v0, 0 # success
@@ -150,8 +165,10 @@ LEAF(_restore_fp_context)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
+ SET_HARDFLOAT
#ifdef CONFIG_CPU_MIPS32_R2
- .set mips64r2
+ .set mips32r2
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5
bgez t0, 1f # skip loading odd if FR=0
@@ -175,6 +192,8 @@ LEAF(_restore_fp_context)
EX ldc1 $f31, SC_FPREGS+248(a0)
1: .set pop
#endif
+ .set push
+ SET_HARDFLOAT
EX ldc1 $f0, SC_FPREGS+0(a0)
EX ldc1 $f2, SC_FPREGS+16(a0)
EX ldc1 $f4, SC_FPREGS+32(a0)
@@ -192,6 +211,7 @@ LEAF(_restore_fp_context)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
ctc1 t1, fcr31
+ .set pop
jr ra
li v0, 0 # success
END(_restore_fp_context)
@@ -199,6 +219,8 @@ LEAF(_restore_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
+ .set push
+ SET_HARDFLOAT
EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
@@ -242,6 +264,7 @@ LEAF(_restore_fp_context32)
ctc1 t1, fcr31
jr ra
li v0, 0 # success
+ .set pop
END(_restore_fp_context32)
#endif
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 4c4ec1812420..64591e671878 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -22,6 +22,9 @@
#include <asm/asmmacro.h>
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
/*
* Offset to the current process status flags, the first 32 bytes of the
* stack are not used.
@@ -65,8 +68,12 @@
bgtz a3, 1f
/* Save 128b MSA vector context + scalar FP control & status. */
+ .set push
+ SET_HARDFLOAT
cfc1 t1, fcr31
msa_save_all a0
+ .set pop /* SET_HARDFLOAT */
+
sw t1, THREAD_FCR31(a0)
b 2f
@@ -161,6 +168,9 @@ LEAF(_init_msa_upper)
#define FPU_DEFAULT 0x00000000
+ .set push
+ SET_HARDFLOAT
+
LEAF(_init_fpu)
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
@@ -232,7 +242,8 @@ LEAF(_init_fpu)
#ifdef CONFIG_CPU_MIPS32_R2
.set push
- .set mips64r2
+ .set mips32r2
+ .set fp=64
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip setting upper 32b
@@ -291,3 +302,5 @@ LEAF(_init_fpu)
#endif
jr ra
END(_init_fpu)
+
+ .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index da0fbe46d83b..47077380c15c 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -18,6 +18,9 @@
.set noreorder
.set mips2
+ .set push
+ SET_HARDFLOAT
+
/* Save floating point context */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
@@ -85,3 +88,5 @@
1: jr ra
nop
END(_restore_fp_context)
+
+ .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 31b1b763cb29..c5c4fd54d797 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep)
int ret = 0;
if (index >= RTLX_CHANNELS) {
- pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
+ pr_debug("rtlx_open index out of range\n");
return -ENOSYS;
}
if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
- pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
+ pr_debug("rtlx_open channel %d already opened\n", index);
ret = -EBUSY;
goto out_fail;
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 744cd10ba599..00cad1005a16 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -579,3 +579,4 @@ EXPORT(sys_call_table)
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
+ PTR sys_bpf /* 4355 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 002b1bc09c38..5251565e344b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -434,4 +434,5 @@ EXPORT(sys_call_table)
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
+ PTR sys_bpf /* 5315 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index ca6cbbe9805b..77e74398b828 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -427,4 +427,5 @@ EXPORT(sysn32_call_table)
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
+ PTR sys_bpf
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 9e10d11fbb84..6f8db9f728e8 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -564,4 +564,5 @@ EXPORT(sys32_call_table)
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
+ PTR sys_bpf /* 4355 */
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index b3b8f0d9d4a7..f3b635f86c39 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -485,7 +485,7 @@ static void __init bootmem_init(void)
* NOTE: historically plat_mem_setup did the entire platform initialization.
* This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
- * breaking plat_setup was just renamed to plat_setup and a second platform
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
* initialization hook for anything else was introduced.
*/
@@ -493,7 +493,7 @@ static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
- unsigned long start, size;
+ phys_t start, size;
/*
* If a user specifies memory size, we
@@ -683,7 +683,8 @@ static void __init arch_mem_init(char **cmdline_p)
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Tell bootmem about cma reserved memblock section */
for_each_memblock(reserved, reg)
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+ if (reg->size != 0)
+ reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
}
static void __init resource_init(void)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 1d57605e4615..16f1e4f2bf3c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -658,13 +658,13 @@ static int signal_setup(void)
save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
} else {
- save_fp_context = copy_fp_from_sigcontext;
- restore_fp_context = copy_fp_to_sigcontext;
+ save_fp_context = copy_fp_to_sigcontext;
+ restore_fp_context = copy_fp_from_sigcontext;
}
#endif /* CONFIG_SMP */
#else
- save_fp_context = copy_fp_from_sigcontext;;
- restore_fp_context = copy_fp_to_sigcontext;
+ save_fp_context = copy_fp_to_sigcontext;
+ restore_fp_context = copy_fp_from_sigcontext;
#endif
return 0;
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c17ef80cf65a..5d3238af9b5c 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -503,6 +503,7 @@
STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
.Ldone\@:
jr ra
+ nop
.if __memcpy == 1
END(memcpy)
.set __memcpy, 0
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2ef0cf..1ef365ab3cd3 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -34,7 +34,7 @@ static void dump_tlb(int first, int last)
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
- if ((entryhi & 0xffffe000) != 0x80000000
+ if ((entryhi & 0xfffff000) != 0x80000000
&& (entryhi & 0xfc0) == asid) {
/*
* Only print entries in use
@@ -43,7 +43,7 @@ static void dump_tlb(int first, int last)
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
- (entryhi & 0xffffe000),
+ (entryhi & 0xfffff000),
entryhi & 0xfc0,
entrylo0 & PAGE_MASK,
(entrylo0 & (1 << 11)) ? 1 : 0,
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index f3af6995e2a6..7d12c0dded3d 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -40,9 +40,11 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
.else
EX(lbe, t0, (v0), .Lfault\@)
.endif
- PTR_ADDIU v0, 1
+ .set noreorder
bnez t0, 1b
-1: PTR_SUBU v0, a0
+1: PTR_ADDIU v0, 1
+ .set reorder
+ PTR_SUBU v0, a0
jr ra
END(__strnlen_\func\()_asm)
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 0bb9cc9dc621..d87e03330b29 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
# Serial port support
#
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_SERIAL_8250) += serial.o
+loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
+obj-y += $(loongson-serial-m) $(loongson-serial-y)
obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c
index 37ed184398c6..42323bcc5d28 100644
--- a/arch/mips/loongson/loongson-3/numa.c
+++ b/arch/mips/loongson/loongson-3/numa.c
@@ -33,6 +33,7 @@
static struct node_data prealloc__node_data[MAX_NUMNODES];
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+EXPORT_SYMBOL(__node_distances);
struct node_data *__node_data[MAX_NUMNODES];
EXPORT_SYMBOL(__node_data);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 51a0fde4bec1..cac529a405b8 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -584,11 +584,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
if (insn.i_format.rs == bc_op) {
preempt_disable();
if (is_fpu_owner())
- asm volatile(
- ".set push\n"
- "\t.set mips1\n"
- "\tcfc1\t%0,$31\n"
- "\t.set pop" : "=r" (fcr31));
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index fa6ebd4bc9e9..c3917e251f59 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_save(flags);
+ htw_stop();
pid = read_c0_entryhi() & ASID_MASK;
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
@@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_indexed();
}
tlbw_use_hazard();
+ htw_start();
flush_itlb_vm(vma);
local_irq_restore(flags);
}
@@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
+ htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
@@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
+ htw_start();
out:
local_irq_restore(flags);
return ret;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b5f228e7eae6..e3328a96e809 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
uasm_l_smp_pgtable_change(l, *p);
#endif
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
- if (!m4kc_tlbp_war())
+ if (!m4kc_tlbp_war()) {
build_tlb_probe_entry(p);
+ if (cpu_has_htw) {
+ /* race condition happens, leaving */
+ uasm_i_ehb(p);
+ uasm_i_mfc0(p, wr.r3, C0_INDEX);
+ uasm_il_bltz(p, r, wr.r3, label_leave);
+ uasm_i_nop(p);
+ }
+ }
return wr;
}
diff --git a/arch/mips/mti-sead3/sead3-leds.c b/arch/mips/mti-sead3/sead3-leds.c
index 20102a6d4141..c427c5778186 100644
--- a/arch/mips/mti-sead3/sead3-leds.c
+++ b/arch/mips/mti-sead3/sead3-leds.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
@@ -76,8 +76,4 @@ static int __init led_init(void)
return platform_device_register(&fled_device);
}
-module_init(led_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LED probe driver for SEAD-3");
+device_initcall(led_init);
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
index be358a8050c5..6b43af0a34d9 100644
--- a/arch/mips/netlogic/xlp/Makefile
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -1,6 +1,10 @@
obj-y += setup.o nlm_hal.o cop2-ex.o dt.o
obj-$(CONFIG_SMP) += wakeup.o
-obj-$(CONFIG_USB) += usb-init.o
-obj-$(CONFIG_USB) += usb-init-xlp2.o
-obj-$(CONFIG_SATA_AHCI) += ahci-init.o
-obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o
+ifdef CONFIG_USB
+obj-y += usb-init.o
+obj-y += usb-init-xlp2.o
+endif
+ifdef CONFIG_SATA_AHCI
+obj-y += ahci-init.o
+obj-y += ahci-init-xlp2.o
+endif
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
index 6854ed5097d2..83a1dfd8f0e3 100644
--- a/arch/mips/oprofile/backtrace.c
+++ b/arch/mips/oprofile/backtrace.c
@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
/* This marks the end of the previous function,
which means we overran. */
break;
- stack_size = (unsigned) stack_adjustment;
+ stack_size = (unsigned long) stack_adjustment;
} else if (is_ra_save_ins(&ip)) {
int ra_slot = ip.i_format.simmediate;
if (ra_slot < 0)
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index fa374fe3746b..f7ac3edda1b2 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -443,10 +443,8 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
msg.data = 0xc00 | msixvec;
ret = irq_set_msi_desc(xirq, desc);
- if (ret < 0) {
- destroy_irq(xirq);
+ if (ret < 0)
return ret;
- }
write_msi_msg(xirq, &msg);
return 0;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index a95c00f5fb96..a304bcc37e4f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
}
unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+EXPORT_SYMBOL(__node_distances);
static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
{
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
new file mode 100644
index 000000000000..2361acf6d2b1
--- /dev/null
+++ b/arch/nios2/Kconfig
@@ -0,0 +1,206 @@
+config NIOS2
+ def_bool y
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select CLKSRC_OF
+ select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CPU_DEVICES
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select HAVE_ARCH_TRACEHOOK
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select OF
+ select OF_EARLY_FLATTREE
+ select SOC_BUS
+ select SPARSE_IRQ
+ select USB_ARCH_HAS_HCD if USB_SUPPORT
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config NO_IOPORT_MAP
+ def_bool y
+
+config HAS_DMA
+ def_bool y
+
+config FPU
+ def_bool n
+
+config SWAP
+ def_bool n
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool n
+
+source "init/Kconfig"
+
+menu "Kernel features"
+
+source "kernel/Kconfig.preempt"
+
+source "kernel/Kconfig.freezer"
+
+source "kernel/Kconfig.hz"
+
+source "mm/Kconfig"
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ range 9 20
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
+endmenu
+
+source "arch/nios2/platform/Kconfig.platform"
+
+menu "Processor type and features"
+
+config MMU
+ def_bool y
+
+config NR_CPUS
+ int
+ default "1"
+
+config NIOS2_ALIGNMENT_TRAP
+ bool "Catch alignment trap"
+ default y
+ help
+ Nios II CPUs cannot fetch/store data which is not bus aligned,
+ i.e., a 2 or 4 byte fetch must start at an address divisible by
+ 2 or 4. Any non-aligned load/store instructions will be trapped and
+ emulated in software if you say Y here, which has a performance
+ impact.
+
+comment "Boot options"
+
+config CMDLINE_BOOL
+ bool "Default bootloader kernel arguments"
+ default y
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+ depends on CMDLINE_BOOL
+ help
+ On some platforms, there is currently no way for the boot loader to
+ pass arguments to the kernel. For these platforms, you can supply
+ some command-line options at build time by entering them here. In
+ other cases you can specify kernel args so that you don't have
+ to set them up in board prom initialization routines.
+
+config CMDLINE_FORCE
+ bool "Force default kernel command string"
+ depends on CMDLINE_BOOL
+ help
+ Set this to have arguments from the default kernel command string
+ override those passed by the boot loader.
+
+config NIOS2_CMDLINE_IGNORE_DTB
+ bool "Ignore kernel command string from DTB"
+ depends on CMDLINE_BOOL
+ depends on !CMDLINE_FORCE
+ default y
+ help
+ Set this to ignore the bootargs property from the devicetree's
+ chosen node and fall back to CMDLINE if nothing is passed.
+
+config NIOS2_PASS_CMDLINE
+ bool "Passed kernel command line from u-boot"
+ default n
+ help
+ Use bootargs env variable from u-boot for kernel command line.
+ will override "Default kernel command string".
+ Say N if you are unsure.
+
+endmenu
+
+menu "Advanced setup"
+
+config ADVANCED_OPTIONS
+ bool "Prompt for advanced kernel configuration options"
+ help
+
+comment "Default settings for advanced configuration options are used"
+ depends on !ADVANCED_OPTIONS
+
+config NIOS2_KERNEL_MMU_REGION_BASE_BOOL
+ bool "Set custom kernel MMU region base address"
+ depends on ADVANCED_OPTIONS
+ help
+ This option allows you to set the virtual address of the kernel MMU region.
+
+ Say N here unless you know what you are doing.
+
+config NIOS2_KERNEL_MMU_REGION_BASE
+ hex "Virtual base address of the kernel MMU region " if NIOS2_KERNEL_MMU_REGION_BASE_BOOL
+ default "0x80000000"
+ help
+ This option allows you to set the virtual base address of the kernel MMU region.
+
+config NIOS2_KERNEL_REGION_BASE_BOOL
+ bool "Set custom kernel region base address"
+ depends on ADVANCED_OPTIONS
+ help
+ This option allows you to set the virtual address of the kernel region.
+
+ Say N here unless you know what you are doing.
+
+config NIOS2_KERNEL_REGION_BASE
+ hex "Virtual base address of the kernel region " if NIOS2_KERNEL_REGION_BASE_BOOL
+ default "0xc0000000"
+
+config NIOS2_IO_REGION_BASE_BOOL
+ bool "Set custom I/O region base address"
+ depends on ADVANCED_OPTIONS
+ help
+ This option allows you to set the virtual address of the I/O region.
+
+ Say N here unless you know what you are doing.
+
+config NIOS2_IO_REGION_BASE
+ hex "Virtual base address of the I/O region" if NIOS2_IO_REGION_BASE_BOOL
+ default "0xe0000000"
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/nios2/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
new file mode 100644
index 000000000000..8d4e6bacd997
--- /dev/null
+++ b/arch/nios2/Kconfig.debug
@@ -0,0 +1,17 @@
+menu "Kernel hacking"
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+source "lib/Kconfig.debug"
+
+config DEBUG_STACK_USAGE
+ bool "Enable stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T and sysrq-P debug output.
+
+ This option will slow down process creation somewhat.
+
+endmenu
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
new file mode 100644
index 000000000000..e142c9ee51fa
--- /dev/null
+++ b/arch/nios2/Makefile
@@ -0,0 +1,73 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2013 Altera Corporation
+# Copyright (C) 1994, 95, 96, 2003 by Wind River Systems
+# Written by Fredrik Markstrom
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# Nios2 port by Wind River Systems Inc trough:
+# fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+
+UTS_SYSNAME = Linux
+
+export MMU
+
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+
+KBUILD_CFLAGS += -pipe -D__linux__ -D__ELF__
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MUL_SUPPORT),-mhw-mul,-mno-hw-mul)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MULX_SUPPORT),-mhw-mulx,-mno-hw-mulx)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_DIV_SUPPORT),-mhw-div,-mno-hw-div)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_FPU_SUPPORT),-mcustom-fpu-cfg=60-1,)
+
+KBUILD_CFLAGS += -fno-optimize-sibling-calls
+KBUILD_CFLAGS += -DUTS_SYSNAME=\"$(UTS_SYSNAME)\"
+KBUILD_CFLAGS += -fno-builtin
+KBUILD_CFLAGS += -G 0
+
+head-y := arch/nios2/kernel/head.o
+libs-y += arch/nios2/lib/ $(LIBGCC)
+core-y += arch/nios2/kernel/ arch/nios2/mm/
+core-y += arch/nios2/platform/
+
+INSTALL_PATH ?= /tftpboot
+nios2-boot := arch/$(ARCH)/boot
+BOOT_TARGETS = vmImage zImage
+PHONY += $(BOOT_TARGETS) install
+KBUILD_IMAGE := $(nios2-boot)/vmImage
+
+ifneq ($(CONFIG_NIOS2_DTB_SOURCE),"")
+ core-y += $(nios2-boot)/
+endif
+
+all: vmImage
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(nios2-boot)
+
+%.dtb:
+ $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
+
+dtbs:
+ $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
+
+$(BOOT_TARGETS): vmlinux
+ $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
+
+install:
+ $(Q)$(MAKE) $(build)=$(nios2-boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+
+define archhelp
+ echo '* vmImage - Kernel-only image for U-Boot ($(KBUILD_IMAGE))'
+ echo ' install - Install kernel using'
+ echo ' (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
+ echo ' install to $$(INSTALL_PATH)'
+ echo ' dtbs - Build device tree blobs for enabled boards'
+endef
diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile
new file mode 100644
index 000000000000..59392dc0bdcb
--- /dev/null
+++ b/arch/nios2/boot/Makefile
@@ -0,0 +1,52 @@
+#
+# arch/nios2/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+UIMAGE_LOADADDR = $(shell $(NM) vmlinux | awk '$$NF == "_stext" {print $$1}')
+UIMAGE_ENTRYADDR = $(shell $(NM) vmlinux | awk '$$NF == "_start" {print $$1}')
+UIMAGE_COMPRESSION = gzip
+
+OBJCOPYFLAGS_vmlinux.bin := -O binary
+
+targets += vmlinux.bin vmlinux.gz vmImage
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+$(obj)/vmImage: $(obj)/vmlinux.gz
+ $(call if_changed,uimage)
+ @$(kecho) 'Kernel: $@ is ready'
+
+# Rule to build device tree blobs
+DTB_SRC := $(patsubst "%",%,$(CONFIG_NIOS2_DTB_SOURCE))
+
+# Make sure the generated dtb gets removed during clean
+extra-$(CONFIG_NIOS2_DTB_SOURCE_BOOL) += system.dtb
+
+$(obj)/system.dtb: $(DTB_SRC) FORCE
+ $(call cmd,dtc)
+
+# Ensure system.dtb exists
+$(obj)/linked_dtb.o: $(obj)/system.dtb
+
+obj-$(CONFIG_NIOS2_DTB_SOURCE_BOOL) += linked_dtb.o
+
+targets += $(dtb-y)
+
+# Rule to build device tree blobs with make command
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+ $(call if_changed_dep,dtc)
+
+$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
+
+clean-files := *.dtb
+
+install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts
new file mode 100644
index 000000000000..31c51f9a2f09
--- /dev/null
+++ b/arch/nios2/boot/dts/3c120_devboard.dts
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file is generated by sopc2dts.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "altr,qsys_ghrd_3c120";
+ compatible = "altr,qsys_ghrd_3c120";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu: cpu@0x0 {
+ device_type = "cpu";
+ compatible = "altr,nios2-1.0";
+ reg = <0x00000000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ clock-frequency = <125000000>;
+ dcache-line-size = <32>;
+ icache-line-size = <32>;
+ dcache-size = <32768>;
+ icache-size = <32768>;
+ altr,implementation = "fast";
+ altr,pid-num-bits = <8>;
+ altr,tlb-num-ways = <16>;
+ altr,tlb-num-entries = <128>;
+ altr,tlb-ptr-sz = <7>;
+ altr,has-div = <1>;
+ altr,has-mul = <1>;
+ altr,reset-addr = <0xc2800000>;
+ altr,fast-tlb-miss-addr = <0xc7fff400>;
+ altr,exception-addr = <0xd0000020>;
+ altr,has-initda = <1>;
+ altr,has-mmu = <1>;
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x10000000 0x08000000>,
+ <0x07fff400 0x00000400>;
+ };
+
+ sopc@0 {
+ device_type = "soc";
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "altr,avalon", "simple-bus";
+ bus-frequency = <125000000>;
+
+ pb_cpu_to_io: bridge@0x8000000 {
+ compatible = "simple-bus";
+ reg = <0x08000000 0x00800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00002000 0x08002000 0x00002000>,
+ <0x00004000 0x08004000 0x00000400>,
+ <0x00004400 0x08004400 0x00000040>,
+ <0x00004800 0x08004800 0x00000040>,
+ <0x00004c80 0x08004c80 0x00000020>,
+ <0x00004d50 0x08004d50 0x00000008>,
+ <0x00008000 0x08008000 0x00000020>,
+ <0x00400000 0x08400000 0x00000020>;
+
+ timer_1ms: timer@0x400000 {
+ compatible = "altr,timer-1.0";
+ reg = <0x00400000 0x00000020>;
+ interrupt-parent = <&cpu>;
+ interrupts = <11>;
+ clock-frequency = <125000000>;
+ };
+
+ timer_0: timer@0x8000 {
+ compatible = "altr,timer-1.0";
+ reg = < 0x00008000 0x00000020 >;
+ interrupt-parent = < &cpu >;
+ interrupts = < 5 >;
+ clock-frequency = < 125000000 >;
+ };
+
+ jtag_uart: serial@0x4d50 {
+ compatible = "altr,juart-1.0";
+ reg = <0x00004d50 0x00000008>;
+ interrupt-parent = <&cpu>;
+ interrupts = <1>;
+ };
+
+ tse_mac: ethernet@0x4000 {
+ compatible = "altr,tse-1.0";
+ reg = <0x00004000 0x00000400>,
+ <0x00004400 0x00000040>,
+ <0x00004800 0x00000040>,
+ <0x00002000 0x00002000>;
+ reg-names = "control_port", "rx_csr", "tx_csr", "s1";
+ interrupt-parent = <&cpu>;
+ interrupts = <2 3>;
+ interrupt-names = "rx_irq", "tx_irq";
+ rx-fifo-depth = <8192>;
+ tx-fifo-depth = <8192>;
+ max-frame-size = <1518>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy0>;
+ tse_mac_mdio: mdio {
+ compatible = "altr,tse-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@18 {
+ reg = <18>;
+ device_type = "ethernet-phy";
+ };
+ };
+ };
+
+ uart: serial@0x4c80 {
+ compatible = "altr,uart-1.0";
+ reg = <0x00004c80 0x00000020>;
+ interrupt-parent = <&cpu>;
+ interrupts = <10>;
+ current-speed = <115200>;
+ clock-frequency = <62500000>;
+ };
+ };
+
+ cfi_flash_64m: flash@0x0 {
+ compatible = "cfi-flash";
+ reg = <0x00000000 0x04000000>;
+ bank-width = <2>;
+ device-width = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@800000 {
+ reg = <0x00800000 0x01e00000>;
+ label = "JFFS2 Filesystem";
+ };
+ };
+ };
+
+ chosen {
+ bootargs = "debug console=ttyJ0,115200";
+ };
+};
diff --git a/arch/nios2/boot/install.sh b/arch/nios2/boot/install.sh
new file mode 100644
index 000000000000..3cb3f468bc51
--- /dev/null
+++ b/arch/nios2/boot/install.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for nios2 architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+# Default install - same as make zlilo
+
+if [ -f $4/vmlinuz ]; then
+ mv $4/vmlinuz $4/vmlinuz.old
+fi
+
+if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
+fi
+
+cat $2 > $4/vmlinuz
+cp $3 $4/System.map
+
+sync
diff --git a/arch/nios2/boot/linked_dtb.S b/arch/nios2/boot/linked_dtb.S
new file mode 100644
index 000000000000..071f922db338
--- /dev/null
+++ b/arch/nios2/boot/linked_dtb.S
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+.section .dtb.init.rodata,"a"
+.incbin "arch/nios2/boot/system.dtb"
diff --git a/arch/nios2/configs/3c120_defconfig b/arch/nios2/configs/3c120_defconfig
new file mode 100644
index 000000000000..87541f0a5d6e
--- /dev/null
+++ b/arch/nios2/configs/3c120_defconfig
@@ -0,0 +1,77 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NIOS2_MEM_BASE=0x10000000
+CONFIG_NIOS2_HW_MUL_SUPPORT=y
+CONFIG_NIOS2_HW_DIV_SUPPORT=y
+CONFIG_CUSTOM_CACHE_SETTINGS=y
+CONFIG_NIOS2_DCACHE_SIZE=0x8000
+CONFIG_NIOS2_ICACHE_SIZE=0x8000
+# CONFIG_NIOS2_CMDLINE_IGNORE_DTB is not set
+CONFIG_NIOS2_PASS_CMDLINE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_NETDEVICES=y
+CONFIG_ALTERA_TSE=y
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_ALTERA_JTAGUART=y
+CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE=y
+CONFIG_SERIAL_ALTERA_UART=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
new file mode 100644
index 000000000000..bb160be0dc28
--- /dev/null
+++ b/arch/nios2/include/asm/Kbuild
@@ -0,0 +1,66 @@
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += barrier.h
+generic-y += bitops.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += mcs_spinlock.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += preempt.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += spinlock.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/nios2/include/asm/asm-macros.h b/arch/nios2/include/asm/asm-macros.h
new file mode 100644
index 000000000000..29fa2e4d7b00
--- /dev/null
+++ b/arch/nios2/include/asm/asm-macros.h
@@ -0,0 +1,309 @@
+/*
+ * Macro used to simplify coding multi-line assembler.
+ * Some of the bit test macro can simplify down to one line
+ * depending on the mask value.
+ *
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+#ifndef _ASM_NIOS2_ASMMACROS_H
+#define _ASM_NIOS2_ASMMACROS_H
+/*
+ * ANDs reg2 with mask and places the result in reg1.
+ *
+ * You cannnot use the same register for reg1 & reg2.
+ */
+
+.macro ANDI32 reg1, reg2, mask
+.if \mask & 0xffff
+ .if \mask & 0xffff0000
+ movhi \reg1, %hi(\mask)
+ movui \reg1, %lo(\mask)
+ and \reg1, \reg1, \reg2
+ .else
+ andi \reg1, \reg2, %lo(\mask)
+ .endif
+.else
+ andhi \reg1, \reg2, %hi(\mask)
+.endif
+.endm
+
+/*
+ * ORs reg2 with mask and places the result in reg1.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro ORI32 reg1, reg2, mask
+.if \mask & 0xffff
+ .if \mask & 0xffff0000
+ orhi \reg1, \reg2, %hi(\mask)
+ ori \reg1, \reg2, %lo(\mask)
+ .else
+ ori \reg1, \reg2, %lo(\mask)
+ .endif
+.else
+ orhi \reg1, \reg2, %hi(\mask)
+.endif
+.endm
+
+/*
+ * XORs reg2 with mask and places the result in reg1.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro XORI32 reg1, reg2, mask
+.if \mask & 0xffff
+ .if \mask & 0xffff0000
+ xorhi \reg1, \reg2, %hi(\mask)
+ xori \reg1, \reg1, %lo(\mask)
+ .else
+ xori \reg1, \reg2, %lo(\mask)
+ .endif
+.else
+ xorhi \reg1, \reg2, %hi(\mask)
+.endif
+.endm
+
+/*
+ * This is a support macro for BTBZ & BTBNZ. It checks
+ * the bit to make sure it is valid 32 value.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro BT reg1, reg2, bit
+.if \bit > 31
+ .err
+.else
+ .if \bit < 16
+ andi \reg1, \reg2, (1 << \bit)
+ .else
+ andhi \reg1, \reg2, (1 << (\bit - 16))
+ .endif
+.endif
+.endm
+
+/*
+ * Tests the bit in reg2 and branches to label if the
+ * bit is zero. The result of the bit test is stored in reg1.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTBZ reg1, reg2, bit, label
+ BT \reg1, \reg2, \bit
+ beq \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and branches to label if the
+ * bit is non-zero. The result of the bit test is stored in reg1.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTBNZ reg1, reg2, bit, label
+ BT \reg1, \reg2, \bit
+ bne \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and then compliments the bit in reg2.
+ * The result of the bit test is stored in reg1.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTC reg1, reg2, bit
+.if \bit > 31
+ .err
+.else
+ .if \bit < 16
+ andi \reg1, \reg2, (1 << \bit)
+ xori \reg2, \reg2, (1 << \bit)
+ .else
+ andhi \reg1, \reg2, (1 << (\bit - 16))
+ xorhi \reg2, \reg2, (1 << (\bit - 16))
+ .endif
+.endif
+.endm
+
+/*
+ * Tests the bit in reg2 and then sets the bit in reg2.
+ * The result of the bit test is stored in reg1.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTS reg1, reg2, bit
+.if \bit > 31
+ .err
+.else
+ .if \bit < 16
+ andi \reg1, \reg2, (1 << \bit)
+ ori \reg2, \reg2, (1 << \bit)
+ .else
+ andhi \reg1, \reg2, (1 << (\bit - 16))
+ orhi \reg2, \reg2, (1 << (\bit - 16))
+ .endif
+.endif
+.endm
+
+/*
+ * Tests the bit in reg2 and then resets the bit in reg2.
+ * The result of the bit test is stored in reg1.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTR reg1, reg2, bit
+.if \bit > 31
+ .err
+.else
+ .if \bit < 16
+ andi \reg1, \reg2, (1 << \bit)
+ andi \reg2, \reg2, %lo(~(1 << \bit))
+ .else
+ andhi \reg1, \reg2, (1 << (\bit - 16))
+ andhi \reg2, \reg2, %lo(~(1 << (\bit - 16)))
+ .endif
+.endif
+.endm
+
+/*
+ * Tests the bit in reg2 and then compliments the bit in reg2.
+ * The result of the bit test is stored in reg1. If the
+ * original bit was zero it branches to label.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTCBZ reg1, reg2, bit, label
+ BTC \reg1, \reg2, \bit
+ beq \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and then compliments the bit in reg2.
+ * The result of the bit test is stored in reg1. If the
+ * original bit was non-zero it branches to label.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTCBNZ reg1, reg2, bit, label
+ BTC \reg1, \reg2, \bit
+ bne \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and then sets the bit in reg2.
+ * The result of the bit test is stored in reg1. If the
+ * original bit was zero it branches to label.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTSBZ reg1, reg2, bit, label
+ BTS \reg1, \reg2, \bit
+ beq \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and then sets the bit in reg2.
+ * The result of the bit test is stored in reg1. If the
+ * original bit was non-zero it branches to label.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTSBNZ reg1, reg2, bit, label
+ BTS \reg1, \reg2, \bit
+ bne \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and then resets the bit in reg2.
+ * The result of the bit test is stored in reg1. If the
+ * original bit was zero it branches to label.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTRBZ reg1, reg2, bit, label
+ BTR \reg1, \reg2, \bit
+ bne \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bit in reg2 and then resets the bit in reg2.
+ * The result of the bit test is stored in reg1. If the
+ * original bit was non-zero it branches to label.
+ *
+ * It is NOT safe to use the same register for reg1 & reg2.
+ */
+
+.macro BTRBNZ reg1, reg2, bit, label
+ BTR \reg1, \reg2, \bit
+ bne \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bits in mask against reg2 stores the result in reg1.
+ * If the all the bits in the mask are zero it branches to label.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro TSTBZ reg1, reg2, mask, label
+ ANDI32 \reg1, \reg2, \mask
+ beq \reg1, r0, \label
+.endm
+
+/*
+ * Tests the bits in mask against reg2 stores the result in reg1.
+ * If the any of the bits in the mask are 1 it branches to label.
+ *
+ * It is safe to use the same register for reg1 & reg2.
+ */
+
+.macro TSTBNZ reg1, reg2, mask, label
+ ANDI32 \reg1, \reg2, \mask
+ bne \reg1, r0, \label
+.endm
+
+/*
+ * Pushes reg onto the stack.
+ */
+
+.macro PUSH reg
+ addi sp, sp, -4
+ stw \reg, 0(sp)
+.endm
+
+/*
+ * Pops the top of the stack into reg.
+ */
+
+.macro POP reg
+ ldw \reg, 0(sp)
+ addi sp, sp, 4
+.endm
+
+
+#endif /* _ASM_NIOS2_ASMMACROS_H */
diff --git a/arch/nios2/include/asm/asm-offsets.h b/arch/nios2/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..5b9f5e04a058
--- /dev/null
+++ b/arch/nios2/include/asm/asm-offsets.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/nios2/include/asm/cache.h b/arch/nios2/include/asm/cache.h
new file mode 100644
index 000000000000..2293cf57e307
--- /dev/null
+++ b/arch/nios2/include/asm/cache.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef _ASM_NIOS2_CACHE_H
+#define _ASM_NIOS2_CACHE_H
+
+#define NIOS2_DCACHE_SIZE CONFIG_NIOS2_DCACHE_SIZE
+#define NIOS2_ICACHE_SIZE CONFIG_NIOS2_ICACHE_SIZE
+#define NIOS2_DCACHE_LINE_SIZE CONFIG_NIOS2_DCACHE_LINE_SIZE
+#define NIOS2_ICACHE_LINE_SHIFT 5
+#define NIOS2_ICACHE_LINE_SIZE (1 << NIOS2_ICACHE_LINE_SHIFT)
+
+/* bytes per L1 cache line */
+#define L1_CACHE_SHIFT NIOS2_ICACHE_LINE_SHIFT
+#define L1_CACHE_BYTES NIOS2_ICACHE_LINE_SIZE
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#define __cacheline_aligned
+#define ____cacheline_aligned
+
+#endif
diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
new file mode 100644
index 000000000000..52abba973dc2
--- /dev/null
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2003 Microtronix Datacom Ltd.
+ * Copyright (C) 2000-2002 Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_CACHEFLUSH_H
+#define _ASM_NIOS2_CACHEFLUSH_H
+
+#include <linux/mm_types.h>
+
+/*
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
+ */
+#define PG_dcache_clean PG_arch_1
+
+struct mm_struct;
+
+extern void flush_cache_all(void);
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_dup_mm(struct mm_struct *mm);
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long pfn);
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *page);
+
+extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+
+#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
+#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
+
+extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr,
+ void *dst, void *src, int len);
+extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr,
+ void *dst, void *src, int len);
+
+extern void flush_dcache_range(unsigned long start, unsigned long end);
+extern void invalidate_dcache_range(unsigned long start, unsigned long end);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#endif /* _ASM_NIOS2_CACHEFLUSH_H */
diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h
new file mode 100644
index 000000000000..6bc1f0d5df7b
--- /dev/null
+++ b/arch/nios2/include/asm/checksum.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS_CHECKSUM_H
+#define _ASM_NIOS_CHECKSUM_H
+
+/* Take these from lib/checksum.c */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+extern __wsum csum_partial_copy(const void *src, void *dst, int len,
+ __wsum sum);
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ __asm__ __volatile__(
+ "add %0, %1, %0\n"
+ "cmpltu r8, %0, %1\n"
+ "srli %0, %0, 16\n"
+ "add %0, %0, r8\n"
+ "nor %0, %0, %0\n"
+ : "=r" (sum)
+ : "r" (sum << 16), "0" (sum)
+ : "r8");
+ return (__force __sum16) sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ __asm__ __volatile__(
+ "add %0, %1, %0\n"
+ "cmpltu r8, %0, %1\n"
+ "add %0, %0, r8\n" /* add carry */
+ "add %0, %2, %0\n"
+ "cmpltu r8, %0, %2\n"
+ "add %0, %0, r8\n" /* add carry */
+ "add %0, %3, %0\n"
+ "cmpltu r8, %0, %3\n"
+ "add %0, %0, r8\n" /* add carry */
+ : "=r" (sum), "=r" (saddr)
+ : "r" (daddr), "r" ((ntohs(len) << 16) + (proto * 256)),
+ "0" (sum),
+ "1" (saddr)
+ : "r8");
+
+ return sum;
+}
+
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#endif /* _ASM_NIOS_CHECKSUM_H */
diff --git a/arch/nios2/include/asm/cmpxchg.h b/arch/nios2/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..85938711542d
--- /dev/null
+++ b/arch/nios2/include/asm/cmpxchg.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_CMPXCHG_H
+#define _ASM_NIOS2_CMPXCHG_H
+
+#include <linux/irqflags.h>
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ unsigned long tmp, flags;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(
+ "ldb %0, %2\n"
+ "stb %1, %2\n"
+ : "=&r" (tmp)
+ : "r" (x), "m" (*__xg(ptr))
+ : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__(
+ "ldh %0, %2\n"
+ "sth %1, %2\n"
+ : "=&r" (tmp)
+ : "r" (x), "m" (*__xg(ptr))
+ : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__(
+ "ldw %0, %2\n"
+ "stw %1, %2\n"
+ : "=&r" (tmp)
+ : "r" (x), "m" (*__xg(ptr))
+ : "memory");
+ break;
+ }
+
+ local_irq_restore(flags);
+ return tmp;
+}
+
+#include <asm-generic/cmpxchg.h>
+#include <asm-generic/cmpxchg-local.h>
+
+#endif /* _ASM_NIOS2_CMPXCHG_H */
diff --git a/arch/nios2/include/asm/cpuinfo.h b/arch/nios2/include/asm/cpuinfo.h
new file mode 100644
index 000000000000..e88fcae464d9
--- /dev/null
+++ b/arch/nios2/include/asm/cpuinfo.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_CPUINFO_H
+#define _ASM_NIOS2_CPUINFO_H
+
+#include <linux/types.h>
+
+struct cpuinfo {
+ /* Core CPU configuration */
+ char cpu_impl[12];
+ u32 cpu_clock_freq;
+ u32 mmu;
+ u32 has_div;
+ u32 has_mul;
+ u32 has_mulx;
+
+ /* CPU caches */
+ u32 icache_line_size;
+ u32 icache_size;
+ u32 dcache_line_size;
+ u32 dcache_size;
+
+ /* TLB */
+ u32 tlb_pid_num_bits; /* number of bits used for the PID in TLBMISC */
+ u32 tlb_num_ways;
+ u32 tlb_num_ways_log2;
+ u32 tlb_num_entries;
+ u32 tlb_num_lines;
+ u32 tlb_ptr_sz;
+
+ /* Addresses */
+ u32 reset_addr;
+ u32 exception_addr;
+ u32 fast_tlb_miss_exc_addr;
+};
+
+extern struct cpuinfo cpuinfo;
+
+extern void setup_cpuinfo(void);
+
+#endif /* _ASM_NIOS2_CPUINFO_H */
diff --git a/arch/nios2/include/asm/delay.h b/arch/nios2/include/asm/delay.h
new file mode 100644
index 000000000000..098e49bf3aa3
--- /dev/null
+++ b/arch/nios2/include/asm/delay.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Altera Corporation
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_DELAY_H
+#define _ASM_NIOS2_DELAY_H
+
+#include <asm-generic/delay.h>
+
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern unsigned long loops_per_jiffy;
+
+#endif /* _ASM_NIOS2_DELAY_H */
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..b5567233f7f1
--- /dev/null
+++ b/arch/nios2/include/asm/dma-mapping.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#ifndef _ASM_NIOS2_DMA_MAPPING_H
+#define _ASM_NIOS2_DMA_MAPPING_H
+
+#include <linux/scatterlist.h>
+#include <linux/cache.h>
+#include <asm/cacheflush.h>
+
+static inline void __dma_sync_for_device(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ case DMA_TO_DEVICE:
+ /*
+ * We just need to flush the caches here , but Nios2 flush
+ * instruction will do both writeback and invalidate.
+ */
+ case DMA_BIDIRECTIONAL: /* flush and invalidate */
+ flush_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ case DMA_TO_DEVICE:
+ break;
+ default:
+ BUG();
+ }
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ __dma_sync_for_device(ptr, size, direction);
+ return virt_to_phys(ptr);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction)
+{
+}
+
+extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction);
+extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction);
+extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction);
+extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction);
+extern void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
+extern void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+extern void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction);
+extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction);
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+/*
+* dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+* do any flushing here.
+*/
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+#endif /* _ASM_NIOS2_DMA_MAPPING_H */
diff --git a/arch/nios2/include/asm/elf.h b/arch/nios2/include/asm/elf.h
new file mode 100644
index 000000000000..b7d655dff731
--- /dev/null
+++ b/arch/nios2/include/asm/elf.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_ELF_H
+#define _ASM_NIOS2_ELF_H
+
+#include <uapi/asm/elf.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_ALTERA_NIOS2)
+
+#define ELF_PLAT_INIT(_r, load_addr)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE 0xD0000000UL
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different) */
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+{ do { \
+ /* Bleech. */ \
+ pr_reg[0] = regs->r8; \
+ pr_reg[1] = regs->r9; \
+ pr_reg[2] = regs->r10; \
+ pr_reg[3] = regs->r11; \
+ pr_reg[4] = regs->r12; \
+ pr_reg[5] = regs->r13; \
+ pr_reg[6] = regs->r14; \
+ pr_reg[7] = regs->r15; \
+ pr_reg[8] = regs->r1; \
+ pr_reg[9] = regs->r2; \
+ pr_reg[10] = regs->r3; \
+ pr_reg[11] = regs->r4; \
+ pr_reg[12] = regs->r5; \
+ pr_reg[13] = regs->r6; \
+ pr_reg[14] = regs->r7; \
+ pr_reg[15] = regs->orig_r2; \
+ pr_reg[16] = regs->ra; \
+ pr_reg[17] = regs->fp; \
+ pr_reg[18] = regs->sp; \
+ pr_reg[19] = regs->gp; \
+ pr_reg[20] = regs->estatus; \
+ pr_reg[21] = regs->ea; \
+ pr_reg[22] = regs->orig_r7; \
+ { \
+ struct switch_stack *sw = ((struct switch_stack *)regs) - 1; \
+ pr_reg[23] = sw->r16; \
+ pr_reg[24] = sw->r17; \
+ pr_reg[25] = sw->r18; \
+ pr_reg[26] = sw->r19; \
+ pr_reg[27] = sw->r20; \
+ pr_reg[28] = sw->r21; \
+ pr_reg[29] = sw->r22; \
+ pr_reg[30] = sw->r23; \
+ pr_reg[31] = sw->fp; \
+ pr_reg[32] = sw->gp; \
+ pr_reg[33] = sw->ra; \
+ } \
+} while (0); }
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo. */
+
+#define ELF_PLATFORM (NULL)
+
+#endif /* _ASM_NIOS2_ELF_H */
diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h
new file mode 100644
index 000000000000..cf37f55efbc2
--- /dev/null
+++ b/arch/nios2/include/asm/entry.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_ENTRY_H
+#define _ASM_NIOS2_ENTRY_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/registers.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Standard Nios2 interrupt entry and exit macros.
+ * Must be called with interrupts disabled.
+ */
+.macro SAVE_ALL
+ rdctl r24, estatus
+ andi r24, r24, ESTATUS_EU
+ beq r24, r0, 1f /* In supervisor mode, already on kernel stack */
+
+ movia r24, _current_thread /* Switch to current kernel stack */
+ ldw r24, 0(r24) /* using the thread_info */
+ addi r24, r24, THREAD_SIZE-PT_REGS_SIZE
+ stw sp, PT_SP(r24) /* Save user stack before changing */
+ mov sp, r24
+ br 2f
+
+1 : mov r24, sp
+ addi sp, sp, -PT_REGS_SIZE /* Backup the kernel stack pointer */
+ stw r24, PT_SP(sp)
+2 : stw r1, PT_R1(sp)
+ stw r2, PT_R2(sp)
+ stw r3, PT_R3(sp)
+ stw r4, PT_R4(sp)
+ stw r5, PT_R5(sp)
+ stw r6, PT_R6(sp)
+ stw r7, PT_R7(sp)
+ stw r8, PT_R8(sp)
+ stw r9, PT_R9(sp)
+ stw r10, PT_R10(sp)
+ stw r11, PT_R11(sp)
+ stw r12, PT_R12(sp)
+ stw r13, PT_R13(sp)
+ stw r14, PT_R14(sp)
+ stw r15, PT_R15(sp)
+ stw r2, PT_ORIG_R2(sp)
+ stw r7, PT_ORIG_R7(sp)
+
+ stw ra, PT_RA(sp)
+ stw fp, PT_FP(sp)
+ stw gp, PT_GP(sp)
+ rdctl r24, estatus
+ stw r24, PT_ESTATUS(sp)
+ stw ea, PT_EA(sp)
+.endm
+
+.macro RESTORE_ALL
+ ldw r1, PT_R1(sp) /* Restore registers */
+ ldw r2, PT_R2(sp)
+ ldw r3, PT_R3(sp)
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+ ldw r8, PT_R8(sp)
+ ldw r9, PT_R9(sp)
+ ldw r10, PT_R10(sp)
+ ldw r11, PT_R11(sp)
+ ldw r12, PT_R12(sp)
+ ldw r13, PT_R13(sp)
+ ldw r14, PT_R14(sp)
+ ldw r15, PT_R15(sp)
+ ldw ra, PT_RA(sp)
+ ldw fp, PT_FP(sp)
+ ldw gp, PT_GP(sp)
+ ldw r24, PT_ESTATUS(sp)
+ wrctl estatus, r24
+ ldw ea, PT_EA(sp)
+ ldw sp, PT_SP(sp) /* Restore sp last */
+.endm
+
+.macro SAVE_SWITCH_STACK
+ addi sp, sp, -SWITCH_STACK_SIZE
+ stw r16, SW_R16(sp)
+ stw r17, SW_R17(sp)
+ stw r18, SW_R18(sp)
+ stw r19, SW_R19(sp)
+ stw r20, SW_R20(sp)
+ stw r21, SW_R21(sp)
+ stw r22, SW_R22(sp)
+ stw r23, SW_R23(sp)
+ stw fp, SW_FP(sp)
+ stw gp, SW_GP(sp)
+ stw ra, SW_RA(sp)
+.endm
+
+.macro RESTORE_SWITCH_STACK
+ ldw r16, SW_R16(sp)
+ ldw r17, SW_R17(sp)
+ ldw r18, SW_R18(sp)
+ ldw r19, SW_R19(sp)
+ ldw r20, SW_R20(sp)
+ ldw r21, SW_R21(sp)
+ ldw r22, SW_R22(sp)
+ ldw r23, SW_R23(sp)
+ ldw fp, SW_FP(sp)
+ ldw gp, SW_GP(sp)
+ ldw ra, SW_RA(sp)
+ addi sp, sp, SWITCH_STACK_SIZE
+.endm
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_NIOS2_ENTRY_H */
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h
new file mode 100644
index 000000000000..9102bfd3fa1c
--- /dev/null
+++ b/arch/nios2/include/asm/io.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_IO_H
+#define _ASM_NIOS2_IO_H
+
+#include <linux/types.h>
+#include <asm/pgtable-bits.h>
+
+/* PCI is not supported in nios2, set this to 0. */
+#define IO_SPACE_LIMIT 0
+
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+
+#define writeb_relaxed(x, addr) writeb(x, addr)
+#define writew_relaxed(x, addr) writew(x, addr)
+#define writel_relaxed(x, addr) writel(x, addr)
+
+extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
+ unsigned long cacheflag);
+extern void __iounmap(void __iomem *addr);
+
+static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, 0);
+}
+
+static inline void __iomem *ioremap_nocache(unsigned long physaddr,
+ unsigned long size)
+{
+ return __ioremap(physaddr, size, 0);
+}
+
+static inline void iounmap(void __iomem *addr)
+{
+ __iounmap(addr);
+}
+
+/* Pages to physical address... */
+#define page_to_phys(page) virt_to_phys(page_to_virt(page))
+#define page_to_bus(page) page_to_virt(page)
+
+/* Macros used for converting between virtual and physical mappings. */
+#define phys_to_virt(vaddr) \
+ ((void *)((unsigned long)(vaddr) | CONFIG_NIOS2_KERNEL_REGION_BASE))
+/* Clear top 3 bits */
+#define virt_to_phys(vaddr) \
+ ((unsigned long)((unsigned long)(vaddr) & ~0xE0000000))
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_NIOS2_IO_H */
diff --git a/arch/nios2/include/asm/irq.h b/arch/nios2/include/asm/irq.h
new file mode 100644
index 000000000000..8e40fd94a36c
--- /dev/null
+++ b/arch/nios2/include/asm/irq.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_IRQ_H
+#define _ASM_NIOS2_IRQ_H
+
+#define NIOS2_CPU_NR_IRQS 32
+
+#include <asm-generic/irq.h>
+#include <linux/irqdomain.h>
+
+#endif
diff --git a/arch/nios2/include/asm/irqflags.h b/arch/nios2/include/asm/irqflags.h
new file mode 100644
index 000000000000..75ab92e639f8
--- /dev/null
+++ b/arch/nios2/include/asm/irqflags.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#include <asm/registers.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return RDCTL(CTL_STATUS);
+}
+
+/*
+ * This will restore ALL status register flags, not only the interrupt
+ * mask flag.
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ WRCTL(CTL_STATUS, flags);
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+ arch_local_irq_restore(flags & ~STATUS_PIE);
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+ arch_local_irq_restore(flags | STATUS_PIE);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & STATUS_PIE) == 0;
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+ arch_local_irq_restore(flags & ~STATUS_PIE);
+ return flags;
+}
+
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/nios2/include/asm/linkage.h b/arch/nios2/include/asm/linkage.h
new file mode 100644
index 000000000000..e0c6decd7d58
--- /dev/null
+++ b/arch/nios2/include/asm/linkage.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef _ASM_NIOS2_LINKAGE_H
+#define _ASM_NIOS2_LINKAGE_H
+
+/* This file is required by include/linux/linkage.h */
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/nios2/include/asm/mmu.h b/arch/nios2/include/asm/mmu.h
new file mode 100644
index 000000000000..d9c0b1010f26
--- /dev/null
+++ b/arch/nios2/include/asm/mmu.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_MMU_H
+#define _ASM_NIOS2_MMU_H
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+#endif /* _ASM_NIOS2_MMU_H */
diff --git a/arch/nios2/include/asm/mmu_context.h b/arch/nios2/include/asm/mmu_context.h
new file mode 100644
index 000000000000..294b4b1f81d4
--- /dev/null
+++ b/arch/nios2/include/asm/mmu_context.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * based on MIPS asm/mmu_context.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_MMU_CONTEXT_H
+#define _ASM_NIOS2_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+extern void mmu_context_init(void);
+extern unsigned long get_pid_from_context(mm_context_t *ctx);
+
+/*
+ * For the fast tlb miss handlers, we keep a pointer to the current pgd.
+ * processor.
+ */
+extern pgd_t *pgd_current;
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * Initialize the context related info for a new mm_struct instance.
+ *
+ * Set all new contexts to 0, that way the generation will never match
+ * the currently running generation when this context is switched in.
+ */
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ mm->context = 0;
+ return 0;
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
+
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+void activate_mm(struct mm_struct *prev, struct mm_struct *next);
+
+#endif /* _ASM_NIOS2_MMU_CONTEXT_H */
diff --git a/arch/nios2/include/asm/mutex.h b/arch/nios2/include/asm/mutex.h
new file mode 100644
index 000000000000..ff6101aa2c71
--- /dev/null
+++ b/arch/nios2/include/asm/mutex.h
@@ -0,0 +1 @@
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/nios2/include/asm/page.h b/arch/nios2/include/asm/page.h
new file mode 100644
index 000000000000..4b32d6fd9d98
--- /dev/null
+++ b/arch/nios2/include/asm/page.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * MMU support based on asm/page.h from mips which is:
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_PAGE_H
+#define _ASM_NIOS2_PAGE_H
+
+#include <linux/pfn.h>
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ */
+#define PAGE_OFFSET \
+ (CONFIG_NIOS2_MEM_BASE + CONFIG_NIOS2_KERNEL_REGION_BASE)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This gives the physical RAM offset.
+ */
+#define PHYS_OFFSET CONFIG_NIOS2_MEM_BASE
+
+/*
+ * It's normally defined only for FLATMEM config but it's
+ * used in our early mem init code for all memory models.
+ * So always define it.
+ */
+#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+struct page;
+
+extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
+extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *to);
+
+/*
+ * These are used to make use of C type-checking.
+ */
+typedef struct page *pgtable_t;
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+extern unsigned long memory_size;
+
+extern struct page *mem_map;
+
+#endif /* !__ASSEMBLY__ */
+
+# define __pa(x) \
+ ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
+# define __va(x) \
+ ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+#define page_to_virt(page) \
+ ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+
+# define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+# define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && \
+ (pfn) < max_mapnr)
+
+# define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr)))
+# define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr)))
+
+# define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+# define UNCAC_ADDR(addr) \
+ ((void *)((unsigned)(addr) | CONFIG_NIOS2_IO_REGION_BASE))
+# define CAC_ADDR(addr) \
+ ((void *)(((unsigned)(addr) & ~CONFIG_NIOS2_IO_REGION_BASE) | \
+ CONFIG_NIOS2_KERNEL_REGION_BASE))
+
+#include <asm-generic/memory_model.h>
+
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_NIOS2_PAGE_H */
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
new file mode 100644
index 000000000000..6e2985e0a7b9
--- /dev/null
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+
+#ifndef _ASM_NIOS2_PGALLOC_H
+#define _ASM_NIOS2_PGALLOC_H
+
+#include <linux/mm.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/*
+ * Initialize a new pmd table with invalid pointers.
+ */
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, PGD_ORDER);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
+ PTE_ORDER);
+
+ return pte;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+
+ pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ if (pte) {
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
+ clear_highpage(pte);
+ }
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, PTE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ __free_pages(pte, PTE_ORDER);
+}
+
+#define __pte_free_tlb(tlb, pte, addr) \
+ do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+ } while (0)
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _ASM_NIOS2_PGALLOC_H */
diff --git a/arch/nios2/include/asm/pgtable-bits.h b/arch/nios2/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..ce9e7069aa96
--- /dev/null
+++ b/arch/nios2/include/asm/pgtable-bits.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_PGTABLE_BITS_H
+#define _ASM_NIOS2_PGTABLE_BITS_H
+
+/*
+ * These are actual hardware defined protection bits in the tlbacc register
+ * which looks like this:
+ *
+ * 31 30 ... 26 25 24 23 22 21 20 19 18 ... 1 0
+ * ignored........ C R W X G PFN............
+ */
+#define _PAGE_GLOBAL (1<<20)
+#define _PAGE_EXEC (1<<21)
+#define _PAGE_WRITE (1<<22)
+#define _PAGE_READ (1<<23)
+#define _PAGE_CACHED (1<<24) /* C: data access cacheable */
+
+/*
+ * Software defined bits. They are ignored by the hardware and always read back
+ * as zero, but can be written as non-zero.
+ */
+#define _PAGE_PRESENT (1<<25) /* PTE contains a translation */
+#define _PAGE_ACCESSED (1<<26) /* page referenced */
+#define _PAGE_DIRTY (1<<27) /* dirty page */
+#define _PAGE_FILE (1<<28) /* PTE used for file mapping or swap */
+
+#endif /* _ASM_NIOS2_PGTABLE_BITS_H */
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
new file mode 100644
index 000000000000..ccbaffd47671
--- /dev/null
+++ b/arch/nios2/include/asm/pgtable.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *
+ * Based on asm/pgtable-32.h from mips which is:
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_PGTABLE_H
+#define _ASM_NIOS2_PGTABLE_H
+
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <asm/pgtable-bits.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+#define FIRST_USER_ADDRESS 0
+
+#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
+#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
+
+struct mm_struct;
+
+/* Helper macro */
+#define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED | \
+ ((x) ? _PAGE_EXEC : 0) | \
+ ((r) ? _PAGE_READ : 0) | \
+ ((w) ? _PAGE_WRITE : 0))
+/*
+ * These are the macros that generic kernel code needs
+ * (to populate protection_map[])
+ */
+
+/* Remove W bit on private pages for COW support */
+#define __P000 MKP(0, 0, 0)
+#define __P001 MKP(0, 0, 1)
+#define __P010 MKP(0, 0, 0) /* COW */
+#define __P011 MKP(0, 0, 1) /* COW */
+#define __P100 MKP(1, 0, 0)
+#define __P101 MKP(1, 0, 1)
+#define __P110 MKP(1, 0, 0) /* COW */
+#define __P111 MKP(1, 0, 1) /* COW */
+
+/* Shared pages can have exact HW mapping */
+#define __S000 MKP(0, 0, 0)
+#define __S001 MKP(0, 0, 1)
+#define __S010 MKP(0, 1, 0)
+#define __S011 MKP(0, 1, 1)
+#define __S100 MKP(1, 0, 0)
+#define __S101 MKP(1, 0, 1)
+#define __S110 MKP(1, 1, 0)
+#define __S111 MKP(1, 1, 1)
+
+/* Used all over the kernel */
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL)
+
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_ACCESSED)
+
+#define PAGE_COPY MKP(0, 0, 1)
+
+#define PGD_ORDER 0
+#define PTE_ORDER 0
+
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+
+#define USER_PTRS_PER_PGD \
+ (CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
+{
+ pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd;
+}
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+
+static inline int pte_write(pte_t pte) \
+ { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte) \
+ { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) \
+ { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) \
+ { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_CACHED;
+
+ return __pgprot(prot);
+}
+
+static inline int pte_none(pte_t pte)
+{
+ return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf));
+}
+
+static inline int pte_present(pte_t pte) \
+ { return pte_val(pte) & _PAGE_PRESENT; }
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC;
+
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) != (unsigned long) invalid_pte_table)
+ && (pmd_val(pmd) != 0UL);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ pmd_val(*pmdp) = (unsigned long) invalid_pte_table;
+}
+
+#define pte_pfn(pte) (pte_val(pte) & 0xfffff)
+#define pfn_pte(pfn, prot) (__pte(pfn | pgprot_val(prot)))
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+
+/*
+ * Store a linux PTE into the linux page table.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ unsigned long paddr = page_to_virt(pte_page(pteval));
+
+ flush_dcache_range(paddr, paddr + PAGE_SIZE);
+ set_pte(ptep, pteval);
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) ==
+ (unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL);
+}
+
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t null;
+
+ pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
+
+ set_pte_at(mm, addr, ptep, null);
+ flush_tlb_one(addr);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
+
+#define pte_unmap(pte) do { } while (0)
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
+#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#define pmd_page_vaddr(pmd) pmd_val(pmd)
+
+#define pte_offset_map(dir, addr) \
+ ((pte_t *) page_address(pmd_page(*dir)) + \
+ (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+/* Get the address to the PTE for a vaddr in specific directory */
+#define pte_offset_kernel(dir, addr) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + \
+ (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", \
+ __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", \
+ __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte)
+ * && !pte_file(pte)):
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 ... 1 0
+ * 0 0 0 0 type. 0 0 0 0 0 0 offset.........
+ *
+ * This gives us up to 2**2 = 4 swap files and 2**20 * 4K = 4G per swap file.
+ *
+ * Note that the offset field is always non-zero, thus !pte_none(pte) is always
+ * true.
+ */
+#define __swp_type(swp) (((swp).val >> 26) & 0x3)
+#define __swp_offset(swp) ((swp).val & 0xfffff)
+#define __swp_entry(type, off) ((swp_entry_t) { (((type) & 0x3) << 26) \
+ | ((off) & 0xfffff) })
+#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+
+/* Encode and decode a nonlinear file mapping entry */
+#define PTE_FILE_MAX_BITS 25
+#define pte_to_pgoff(pte) (pte_val(pte) & 0x1ffffff)
+#define pgoff_to_pte(off) __pte(((off) & 0x1ffffff) | _PAGE_FILE)
+
+#define kern_addr_valid(addr) (1)
+
+#include <asm-generic/pgtable.h>
+
+#define pgtable_cache_init() do { } while (0)
+
+extern void __init paging_init(void);
+extern void __init mmu_init(void);
+
+extern void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte);
+
+#endif /* _ASM_NIOS2_PGTABLE_H */
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
new file mode 100644
index 000000000000..3bd349473b06
--- /dev/null
+++ b/arch/nios2/include/asm/processor.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ * Copyright (C) 2001 Ken Hill (khill@microtronix.com)
+ * Vic Phillips (vic@microtronix.com)
+ *
+ * based on SPARC asm/processor_32.h which is:
+ *
+ * Copyright (C) 1994 David S. Miller
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_PROCESSOR_H
+#define _ASM_NIOS2_PROCESSOR_H
+
+#include <asm/ptrace.h>
+#include <asm/registers.h>
+#include <asm/page.h>
+
+#define NIOS2_FLAG_KTHREAD 0x00000001 /* task is a kernel thread */
+
+#define NIOS2_OP_NOP 0x1883a
+#define NIOS2_OP_BREAK 0x3da03a
+
+#ifdef __KERNEL__
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+#endif /* __KERNEL__ */
+
+/* Kuser helpers is mapped to this user space address */
+#define KUSER_BASE 0x1000
+#define KUSER_SIZE (PAGE_SIZE)
+#ifndef __ASSEMBLY__
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+# define TASK_SIZE 0x7FFF0000UL
+# define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
+/* The Nios processor specific thread struct. */
+struct thread_struct {
+ struct pt_regs *kregs;
+
+ /* Context switch saved kernel state. */
+ unsigned long ksp;
+ unsigned long kpsr;
+};
+
+#define INIT_MMAP \
+ { &init_mm, (0), (0), __pgprot(0x0), VM_READ | VM_WRITE | VM_EXEC }
+
+# define INIT_THREAD { \
+ .kregs = NULL, \
+ .ksp = 0, \
+ .kpsr = 0, \
+}
+
+extern void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp);
+
+struct task_struct;
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Free current thread data structures etc.. */
+static inline void exit_thread(void)
+{
+}
+
+/* Return saved PC of a blocked thread. */
+#define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
+
+/* Used by procfs */
+#define KSTK_EIP(tsk) ((tsk)->thread.kregs->ea)
+#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
+
+#define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_NIOS2_PROCESSOR_H */
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
new file mode 100644
index 000000000000..20fb1cf2dab6
--- /dev/null
+++ b/arch/nios2/include/asm/ptrace.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * based on m68k asm/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_PTRACE_H
+#define _ASM_NIOS2_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+#define user_mode(regs) (((regs)->estatus & ESTATUS_EU))
+
+#define instruction_pointer(regs) ((regs)->ra)
+#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->sp)
+extern void show_regs(struct pt_regs *);
+
+#define current_pt_regs() \
+ ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
+ - 1)
+
+int do_syscall_trace_enter(void);
+void do_syscall_trace_exit(void);
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_NIOS2_PTRACE_H */
diff --git a/arch/nios2/include/asm/registers.h b/arch/nios2/include/asm/registers.h
new file mode 100644
index 000000000000..615bce19b546
--- /dev/null
+++ b/arch/nios2/include/asm/registers.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_REGISTERS_H
+#define _ASM_NIOS2_REGISTERS_H
+
+#ifndef __ASSEMBLY__
+#include <asm/cpuinfo.h>
+#endif
+
+/* control register numbers */
+#define CTL_STATUS 0
+#define CTL_ESTATUS 1
+#define CTL_BSTATUS 2
+#define CTL_IENABLE 3
+#define CTL_IPENDING 4
+#define CTL_CPUID 5
+#define CTL_RSV1 6
+#define CTL_EXCEPTION 7
+#define CTL_PTEADDR 8
+#define CTL_TLBACC 9
+#define CTL_TLBMISC 10
+#define CTL_RSV2 11
+#define CTL_BADADDR 12
+#define CTL_CONFIG 13
+#define CTL_MPUBASE 14
+#define CTL_MPUACC 15
+
+/* access control registers using GCC builtins */
+#define RDCTL(r) __builtin_rdctl(r)
+#define WRCTL(r, v) __builtin_wrctl(r, v)
+
+/* status register bits */
+#define STATUS_PIE (1 << 0) /* processor interrupt enable */
+#define STATUS_U (1 << 1) /* user mode */
+#define STATUS_EH (1 << 2) /* Exception mode */
+
+/* estatus register bits */
+#define ESTATUS_EPIE (1 << 0) /* processor interrupt enable */
+#define ESTATUS_EU (1 << 1) /* user mode */
+#define ESTATUS_EH (1 << 2) /* Exception mode */
+
+/* tlbmisc register bits */
+#define TLBMISC_PID_SHIFT 4
+#ifndef __ASSEMBLY__
+#define TLBMISC_PID_MASK ((1UL << cpuinfo.tlb_pid_num_bits) - 1)
+#endif
+#define TLBMISC_WAY_MASK 0xf
+#define TLBMISC_WAY_SHIFT 20
+
+#define TLBMISC_PID (TLBMISC_PID_MASK << TLBMISC_PID_SHIFT) /* TLB PID */
+#define TLBMISC_WE (1 << 18) /* TLB write enable */
+#define TLBMISC_RD (1 << 19) /* TLB read */
+#define TLBMISC_WAY (TLBMISC_WAY_MASK << TLBMISC_WAY_SHIFT) /* TLB way */
+
+#endif /* _ASM_NIOS2_REGISTERS_H */
diff --git a/arch/nios2/include/asm/setup.h b/arch/nios2/include/asm/setup.h
new file mode 100644
index 000000000000..dcbf8cf1a344
--- /dev/null
+++ b/arch/nios2/include/asm/setup.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_SETUP_H
+#define _ASM_NIOS2_SETUP_H
+
+#include <asm-generic/setup.h>
+
+#ifndef __ASSEMBLY__
+#ifdef __KERNEL__
+
+extern char exception_handler_hook[];
+extern char fast_handler[];
+extern char fast_handler_end[];
+
+extern void pagetable_init(void);
+
+extern void setup_early_printk(void);
+
+#endif/* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_NIOS2_SETUP_H */
diff --git a/arch/nios2/include/asm/signal.h b/arch/nios2/include/asm/signal.h
new file mode 100644
index 000000000000..bbcf11eecb01
--- /dev/null
+++ b/arch/nios2/include/asm/signal.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright Altera Corporation (C) 2013. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef _NIOS2_SIGNAL_H
+#define _NIOS2_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#endif /* _NIOS2_SIGNAL_H */
diff --git a/arch/nios2/include/asm/string.h b/arch/nios2/include/asm/string.h
new file mode 100644
index 000000000000..14dd570d64f7
--- /dev/null
+++ b/arch/nios2/include/asm/string.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_STRING_H
+#define _ASM_NIOS2_STRING_H
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+
+extern void *memset(void *s, int c, size_t count);
+extern void *memcpy(void *d, const void *s, size_t count);
+extern void *memmove(void *d, const void *s, size_t count);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_NIOS2_STRING_H */
diff --git a/arch/nios2/include/asm/switch_to.h b/arch/nios2/include/asm/switch_to.h
new file mode 100644
index 000000000000..c47b3f4afbcd
--- /dev/null
+++ b/arch/nios2/include/asm/switch_to.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef _ASM_NIOS2_SWITCH_TO_H
+#define _ASM_NIOS2_SWITCH_TO_H
+
+/*
+ * switch_to(n) should switch tasks to task ptr, first checking that
+ * ptr isn't the current task, in which case it does nothing. This
+ * also clears the TS-flag if the task we switched to has used the
+ * math co-processor latest.
+ */
+#define switch_to(prev, next, last) \
+{ \
+ void *_last; \
+ __asm__ __volatile__ ( \
+ "mov r4, %1\n" \
+ "mov r5, %2\n" \
+ "call resume\n" \
+ "mov %0,r4\n" \
+ : "=r" (_last) \
+ : "r" (prev), "r" (next) \
+ : "r4", "r5", "r7", "r8", "ra"); \
+ (last) = _last; \
+}
+
+#endif /* _ASM_NIOS2_SWITCH_TO_H */
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h
new file mode 100644
index 000000000000..9de220854c4a
--- /dev/null
+++ b/arch/nios2/include/asm/syscall.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright Altera Corporation (C) <2014>. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_NIOS2_SYSCALL_H__
+#define __ASM_NIOS2_SYSCALL_H__
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r2;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->r2 = regs->orig_r2;
+ regs->r7 = regs->orig_r7;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r7 ? regs->r2 : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r2;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs, int error, long val)
+{
+ if (error) {
+ /* error < 0, but nios2 uses > 0 return value */
+ regs->r2 = -error;
+ regs->r7 = 1;
+ } else {
+ regs->r2 = val;
+ regs->r7 = 0;
+ }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs, unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+ switch (i) {
+ case 0:
+ if (!n--)
+ break;
+ *args++ = regs->r4;
+ case 1:
+ if (!n--)
+ break;
+ *args++ = regs->r5;
+ case 2:
+ if (!n--)
+ break;
+ *args++ = regs->r6;
+ case 3:
+ if (!n--)
+ break;
+ *args++ = regs->r7;
+ case 4:
+ if (!n--)
+ break;
+ *args++ = regs->r8;
+ case 5:
+ if (!n--)
+ break;
+ *args++ = regs->r9;
+ case 6:
+ if (!n--)
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs, unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+ switch (i) {
+ case 0:
+ if (!n--)
+ break;
+ regs->r4 = *args++;
+ case 1:
+ if (!n--)
+ break;
+ regs->r5 = *args++;
+ case 2:
+ if (!n--)
+ break;
+ regs->r6 = *args++;
+ case 3:
+ if (!n--)
+ break;
+ regs->r7 = *args++;
+ case 4:
+ if (!n--)
+ break;
+ regs->r8 = *args++;
+ case 5:
+ if (!n--)
+ break;
+ regs->r9 = *args++;
+ case 6:
+ if (!n)
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif
diff --git a/arch/nios2/include/asm/syscalls.h b/arch/nios2/include/asm/syscalls.h
new file mode 100644
index 000000000000..0245d780351b
--- /dev/null
+++ b/arch/nios2/include/asm/syscalls.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright Altera Corporation (C) 2013. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef __ASM_NIOS2_SYSCALLS_H
+#define __ASM_NIOS2_SYSCALLS_H
+
+int sys_cacheflush(unsigned long addr, unsigned long len,
+ unsigned int op);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_NIOS2_SYSCALLS_H */
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h
new file mode 100644
index 000000000000..1f266575beb5
--- /dev/null
+++ b/arch/nios2/include/asm/thread_info.h
@@ -0,0 +1,120 @@
+/*
+ * NiosII low-level thread information
+ *
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * Based on asm/thread_info_no.h from m68k which is:
+ *
+ * Copyright (C) 2002 David Howells <dhowells@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_THREAD_INFO_H
+#define _ASM_NIOS2_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+/*
+ * Size of the kernel stack for each process.
+ */
+#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE 8192 /* 2 * PAGE_SIZE */
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable,<0 => BUG */
+ mm_segment_t addr_limit; /* thread address space:
+ 0-0x7FFFFFFF for user-thead
+ 0-0xFFFFFFFF for kernel-thread
+ */
+ struct restart_block restart_block;
+ struct pt_regs *regs;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
+#define TIF_SECCOMP 5 /* secure computing */
+#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
+#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
+
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
+ TIF_NEED_RESCHED */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK 0x0000FFFE
+
+/* work to do on any return to u-space */
+# define _TIF_ALLWORK_MASK 0x0000FFFF
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_NIOS2_THREAD_INFO_H */
diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h
new file mode 100644
index 000000000000..2f2abb28ec2f
--- /dev/null
+++ b/arch/nios2/include/asm/timex.h
@@ -0,0 +1,24 @@
+/* Copyright Altera Corporation (C) 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_TIMEX_H
+#define _ASM_NIOS2_TIMEX_H
+
+typedef unsigned long cycles_t;
+
+extern cycles_t get_cycles(void);
+
+#endif
diff --git a/arch/nios2/include/asm/tlb.h b/arch/nios2/include/asm/tlb.h
new file mode 100644
index 000000000000..d3bc648e08b5
--- /dev/null
+++ b/arch/nios2/include/asm/tlb.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_TLB_H
+#define _ASM_NIOS2_TLB_H
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+extern void set_mmu_pid(unsigned long pid);
+
+/*
+ * NiosII doesn't need any special per-pte or per-vma handling, except
+ * we need to flush cache for the area to be unmapped.
+ */
+#define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_NIOS2_TLB_H */
diff --git a/arch/nios2/include/asm/tlbflush.h b/arch/nios2/include/asm/tlbflush.h
new file mode 100644
index 000000000000..e19652fca1c6
--- /dev/null
+++ b/arch/nios2/include/asm/tlbflush.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _ASM_NIOS2_TLBFLUSH_H
+#define _ASM_NIOS2_TLBFLUSH_H
+
+struct mm_struct;
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_one(unsigned long vaddr);
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ flush_tlb_one(addr);
+}
+
+#endif /* _ASM_NIOS2_TLBFLUSH_H */
diff --git a/arch/nios2/include/asm/traps.h b/arch/nios2/include/asm/traps.h
new file mode 100644
index 000000000000..82a48473280d
--- /dev/null
+++ b/arch/nios2/include/asm/traps.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_TRAPS_H
+#define _ASM_NIOS2_TRAPS_H
+
+#define TRAP_ID_SYSCALL 0
+
+#ifndef __ASSEMBLY__
+void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr);
+#endif
+
+#endif /* _ASM_NIOS2_TRAPS_H */
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
new file mode 100644
index 000000000000..acedc0a2860e
--- /dev/null
+++ b/arch/nios2/include/asm/uaccess.h
@@ -0,0 +1,231 @@
+/*
+ * User space memory access functions for Nios II
+ *
+ * Copyright (C) 2010-2011, Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_UACCESS_H
+#define _ASM_NIOS2_UACCESS_H
+
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry {
+ unsigned long insn;
+ unsigned long fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+/*
+ * Segment stuff
+ */
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+#define USER_DS MAKE_MM_SEG(0x80000000UL)
+#define KERNEL_DS MAKE_MM_SEG(0)
+
+#define get_ds() (KERNEL_DS)
+
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(seg) (current_thread_info()->addr_limit = (seg))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define __access_ok(addr, len) \
+ (((signed long)(((long)get_fs().seg) & \
+ ((long)(addr) | (((long)(addr)) + (len)) | (len)))) == 0)
+
+#define access_ok(type, addr, len) \
+ likely(__access_ok((unsigned long)(addr), (unsigned long)(len)))
+
+# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
+
+/*
+ * Zero Userspace
+ */
+
+static inline unsigned long __must_check __clear_user(void __user *to,
+ unsigned long n)
+{
+ __asm__ __volatile__ (
+ "1: stb zero, 0(%1)\n"
+ " addi %0, %0, -1\n"
+ " addi %1, %1, 1\n"
+ " bne %0, zero, 1b\n"
+ "2:\n"
+ __EX_TABLE_SECTION
+ ".word 1b, 2b\n"
+ ".previous\n"
+ : "=r" (n), "=r" (to)
+ : "0" (n), "1" (to)
+ );
+
+ return n;
+}
+
+static inline unsigned long __must_check clear_user(void __user *to,
+ unsigned long n)
+{
+ if (!access_ok(VERIFY_WRITE, to, n))
+ return n;
+ return __clear_user(to, n);
+}
+
+extern long __copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
+
+static inline long copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ if (!access_ok(VERIFY_READ, from, n))
+ return n;
+ return __copy_from_user(to, from, n);
+}
+
+static inline long copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ if (!access_ok(VERIFY_WRITE, to, n))
+ return n;
+ return __copy_to_user(to, from, n);
+}
+
+extern long strncpy_from_user(char *__to, const char __user *__from,
+ long __len);
+extern long strnlen_user(const char __user *s, long n);
+
+#define __copy_from_user_inatomic __copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+
+/* Optimized macros */
+#define __get_user_asm(val, insn, addr, err) \
+{ \
+ __asm__ __volatile__( \
+ " movi %0, %3\n" \
+ "1: " insn " %1, 0(%2)\n" \
+ " movi %0, 0\n" \
+ "2:\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .word 1b, 2b\n" \
+ " .previous" \
+ : "=&r" (err), "=r" (val) \
+ : "r" (addr), "i" (-EFAULT)); \
+}
+
+#define __get_user_unknown(val, size, ptr, err) do { \
+ err = 0; \
+ if (copy_from_user(&(val), ptr, size)) { \
+ err = -EFAULT; \
+ } \
+ } while (0)
+
+#define __get_user_common(val, size, ptr, err) \
+do { \
+ switch (size) { \
+ case 1: \
+ __get_user_asm(val, "ldbu", ptr, err); \
+ break; \
+ case 2: \
+ __get_user_asm(val, "ldhu", ptr, err); \
+ break; \
+ case 4: \
+ __get_user_asm(val, "ldw", ptr, err); \
+ break; \
+ default: \
+ __get_user_unknown(val, size, ptr, err); \
+ break; \
+ } \
+} while (0)
+
+#define __get_user(x, ptr) \
+ ({ \
+ long __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ unsigned long __gu_val; \
+ __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
+ (x) = (__typeof__(x))__gu_val; \
+ __gu_err; \
+ })
+
+#define get_user(x, ptr) \
+({ \
+ long __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ unsigned long __gu_val = 0; \
+ if (access_ok(VERIFY_READ, __gu_ptr, sizeof(*__gu_ptr))) \
+ __get_user_common(__gu_val, sizeof(*__gu_ptr), \
+ __gu_ptr, __gu_err); \
+ (x) = (__typeof__(x))__gu_val; \
+ __gu_err; \
+})
+
+#define __put_user_asm(val, insn, ptr, err) \
+{ \
+ __asm__ __volatile__( \
+ " movi %0, %3\n" \
+ "1: " insn " %1, 0(%2)\n" \
+ " movi %0, 0\n" \
+ "2:\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .word 1b, 2b\n" \
+ " .previous\n" \
+ : "=&r" (err) \
+ : "r" (val), "r" (ptr), "i" (-EFAULT)); \
+}
+
+#define put_user(x, ptr) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (__typeof(*ptr))(x); \
+ if (access_ok(VERIFY_WRITE, __pu_ptr, sizeof(*__pu_ptr))) { \
+ switch (sizeof(*__pu_ptr)) { \
+ case 1: \
+ __put_user_asm(__pu_val, "stb", __pu_ptr, __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm(__pu_val, "sth", __pu_ptr, __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm(__pu_val, "stw", __pu_ptr, __pu_err); \
+ break; \
+ default: \
+ /* XXX: This looks wrong... */ \
+ __pu_err = 0; \
+ if (copy_to_user(__pu_ptr, &(__pu_val), \
+ sizeof(*__pu_ptr))) \
+ __pu_err = -EFAULT; \
+ break; \
+ } \
+ } \
+ __pu_err; \
+})
+
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#endif /* _ASM_NIOS2_UACCESS_H */
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h
new file mode 100644
index 000000000000..2c87614b0f6e
--- /dev/null
+++ b/arch/nios2/include/asm/ucontext.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASM_NIOS2_UCONTEXT_H
+#define _ASM_NIOS2_UCONTEXT_H
+
+typedef int greg_t;
+#define NGREG 32
+typedef greg_t gregset_t[NGREG];
+
+struct mcontext {
+ int version;
+ gregset_t gregs;
+};
+
+#define MCONTEXT_VERSION 2
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct mcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..4f07ca3f8d10
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -0,0 +1,4 @@
+include include/uapi/asm-generic/Kbuild.asm
+
+header-y += elf.h
+header-y += ucontext.h
diff --git a/arch/nios2/include/uapi/asm/byteorder.h b/arch/nios2/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..3ab5dc20d757
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/byteorder.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASM_NIOS2_BYTEORDER_H
+#define _ASM_NIOS2_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..a5b91ae5cf56
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/elf.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+
+#ifndef _UAPI_ASM_NIOS2_ELF_H
+#define _UAPI_ASM_NIOS2_ELF_H
+
+#include <linux/ptrace.h>
+
+/* Relocation types */
+#define R_NIOS2_NONE 0
+#define R_NIOS2_S16 1
+#define R_NIOS2_U16 2
+#define R_NIOS2_PCREL16 3
+#define R_NIOS2_CALL26 4
+#define R_NIOS2_IMM5 5
+#define R_NIOS2_CACHE_OPX 6
+#define R_NIOS2_IMM6 7
+#define R_NIOS2_IMM8 8
+#define R_NIOS2_HI16 9
+#define R_NIOS2_LO16 10
+#define R_NIOS2_HIADJ16 11
+#define R_NIOS2_BFD_RELOC_32 12
+#define R_NIOS2_BFD_RELOC_16 13
+#define R_NIOS2_BFD_RELOC_8 14
+#define R_NIOS2_GPREL 15
+#define R_NIOS2_GNU_VTINHERIT 16
+#define R_NIOS2_GNU_VTENTRY 17
+#define R_NIOS2_UJMP 18
+#define R_NIOS2_CJMP 19
+#define R_NIOS2_CALLR 20
+#define R_NIOS2_ALIGN 21
+/* Keep this the last entry. */
+#define R_NIOS2_NUM 22
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG \
+ ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \
+ sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef unsigned long elf_fpregset_t;
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_ALTERA_NIOS2
+
+#endif /* _UAPI_ASM_NIOS2_ELF_H */
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..e83a7c9d1c36
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * based on m68k asm/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_NIOS2_PTRACE_H
+#define _UAPI_ASM_NIOS2_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Register numbers used by 'ptrace' system call interface.
+ */
+
+/* GP registers */
+#define PTR_R0 0
+#define PTR_R1 1
+#define PTR_R2 2
+#define PTR_R3 3
+#define PTR_R4 4
+#define PTR_R5 5
+#define PTR_R6 6
+#define PTR_R7 7
+#define PTR_R8 8
+#define PTR_R9 9
+#define PTR_R10 10
+#define PTR_R11 11
+#define PTR_R12 12
+#define PTR_R13 13
+#define PTR_R14 14
+#define PTR_R15 15
+#define PTR_R16 16
+#define PTR_R17 17
+#define PTR_R18 18
+#define PTR_R19 19
+#define PTR_R20 20
+#define PTR_R21 21
+#define PTR_R22 22
+#define PTR_R23 23
+#define PTR_R24 24
+#define PTR_R25 25
+#define PTR_GP 26
+#define PTR_SP 27
+#define PTR_FP 28
+#define PTR_EA 29
+#define PTR_BA 30
+#define PTR_RA 31
+/* Control registers */
+#define PTR_PC 32
+#define PTR_STATUS 33
+#define PTR_ESTATUS 34
+#define PTR_BSTATUS 35
+#define PTR_IENABLE 36
+#define PTR_IPENDING 37
+#define PTR_CPUID 38
+#define PTR_CTL6 39
+#define PTR_CTL7 40
+#define PTR_PTEADDR 41
+#define PTR_TLBACC 42
+#define PTR_TLBMISC 43
+
+#define NUM_PTRACE_REG (PTR_TLBMISC + 1)
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call.
+
+ There is a fake_regs in setup.c that has to match pt_regs.*/
+
+struct pt_regs {
+ unsigned long r8; /* r8-r15 Caller-saved GP registers */
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r1; /* Assembler temporary */
+ unsigned long r2; /* Retval LS 32bits */
+ unsigned long r3; /* Retval MS 32bits */
+ unsigned long r4; /* r4-r7 Register arguments */
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long orig_r2; /* Copy of r2 ?? */
+ unsigned long ra; /* Return address */
+ unsigned long fp; /* Frame pointer */
+ unsigned long sp; /* Stack pointer */
+ unsigned long gp; /* Global pointer */
+ unsigned long estatus;
+ unsigned long ea; /* Exception return address (pc) */
+ unsigned long orig_r7;
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+ unsigned long r16; /* r16-r23 Callee-saved GP registers */
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long fp;
+ unsigned long gp;
+ unsigned long ra;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_ASM_NIOS2_PTRACE_H */
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..7b8bb41867d4
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/sigcontext.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, Microtronix Datacom Ltd.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef _ASM_NIOS2_SIGCONTEXT_H
+#define _ASM_NIOS2_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+struct sigcontext {
+ struct pt_regs regs;
+ unsigned long sc_mask; /* old sigmask */
+};
+
+#endif
diff --git a/arch/nios2/include/uapi/asm/signal.h b/arch/nios2/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..f29ee6314481
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/signal.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright Altera Corporation (C) 2013. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#ifndef _ASM_NIOS2_SIGNAL_H
+#define _ASM_NIOS2_SIGNAL_H
+
+#define SA_RESTORER 0x04000000
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_NIOS2_SIGNAL_H */
diff --git a/arch/nios2/include/uapi/asm/swab.h b/arch/nios2/include/uapi/asm/swab.h
new file mode 100644
index 000000000000..b4e22ebaeb17
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/swab.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2011 Pyramid Technical Consultants, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#ifndef _ASM_NIOS2_SWAB_H
+#define _ASM_NIOS2_SWAB_H
+
+#include <linux/types.h>
+#include <asm-generic/swab.h>
+
+#ifdef CONFIG_NIOS2_CI_SWAB_SUPPORT
+#ifdef __GNUC__
+
+#define __nios2_swab(x) \
+ __builtin_custom_ini(CONFIG_NIOS2_CI_SWAB_NO, (x))
+
+static inline __attribute__((const)) __u16 __arch_swab16(__u16 x)
+{
+ return (__u16) __nios2_swab(((__u32) x) << 16);
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute__((const)) __u32 __arch_swab32(__u32 x)
+{
+ return (__u32) __nios2_swab(x);
+}
+#define __arch_swab32 __arch_swab32
+
+#endif /* __GNUC__ */
+#endif /* CONFIG_NIOS2_CI_SWAB_SUPPORT */
+
+#endif /* _ASM_NIOS2_SWAB_H */
diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..c4bf79510461
--- /dev/null
+++ b/arch/nios2/include/uapi/asm/unistd.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ #define sys_mmap2 sys_mmap_pgoff
+
+/* Use the standard ABI for syscalls */
+#include <asm-generic/unistd.h>
+
+/* Additional Nios II specific syscalls. */
+#define __NR_cacheflush (__NR_arch_specific_syscall)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
diff --git a/arch/nios2/kernel/Makefile b/arch/nios2/kernel/Makefile
new file mode 100644
index 000000000000..8ae76823ff93
--- /dev/null
+++ b/arch/nios2/kernel/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the nios2 linux kernel.
+#
+
+extra-y += head.o
+extra-y += vmlinux.lds
+
+obj-y += cpuinfo.o
+obj-y += entry.o
+obj-y += insnemu.o
+obj-y += irq.o
+obj-y += nios2_ksyms.o
+obj-y += process.o
+obj-y += prom.o
+obj-y += ptrace.o
+obj-y += setup.o
+obj-y += signal.o
+obj-y += sys_nios2.o
+obj-y += syscall_table.o
+obj-y += time.o
+obj-y += traps.o
+
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_NIOS2_ALIGNMENT_TRAP) += misaligned.o
diff --git a/arch/nios2/kernel/asm-offsets.c b/arch/nios2/kernel/asm-offsets.c
new file mode 100644
index 000000000000..c3ee73c18b71
--- /dev/null
+++ b/arch/nios2/kernel/asm-offsets.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+
+int main(void)
+{
+ /* struct task_struct */
+ OFFSET(TASK_THREAD, task_struct, thread);
+ BLANK();
+
+ /* struct thread_struct */
+ OFFSET(THREAD_KSP, thread_struct, ksp);
+ OFFSET(THREAD_KPSR, thread_struct, kpsr);
+ BLANK();
+
+ /* struct pt_regs */
+ OFFSET(PT_ORIG_R2, pt_regs, orig_r2);
+ OFFSET(PT_ORIG_R7, pt_regs, orig_r7);
+
+ OFFSET(PT_R1, pt_regs, r1);
+ OFFSET(PT_R2, pt_regs, r2);
+ OFFSET(PT_R3, pt_regs, r3);
+ OFFSET(PT_R4, pt_regs, r4);
+ OFFSET(PT_R5, pt_regs, r5);
+ OFFSET(PT_R6, pt_regs, r6);
+ OFFSET(PT_R7, pt_regs, r7);
+ OFFSET(PT_R8, pt_regs, r8);
+ OFFSET(PT_R9, pt_regs, r9);
+ OFFSET(PT_R10, pt_regs, r10);
+ OFFSET(PT_R11, pt_regs, r11);
+ OFFSET(PT_R12, pt_regs, r12);
+ OFFSET(PT_R13, pt_regs, r13);
+ OFFSET(PT_R14, pt_regs, r14);
+ OFFSET(PT_R15, pt_regs, r15);
+ OFFSET(PT_EA, pt_regs, ea);
+ OFFSET(PT_RA, pt_regs, ra);
+ OFFSET(PT_FP, pt_regs, fp);
+ OFFSET(PT_SP, pt_regs, sp);
+ OFFSET(PT_GP, pt_regs, gp);
+ OFFSET(PT_ESTATUS, pt_regs, estatus);
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
+ BLANK();
+
+ /* struct switch_stack */
+ OFFSET(SW_R16, switch_stack, r16);
+ OFFSET(SW_R17, switch_stack, r17);
+ OFFSET(SW_R18, switch_stack, r18);
+ OFFSET(SW_R19, switch_stack, r19);
+ OFFSET(SW_R20, switch_stack, r20);
+ OFFSET(SW_R21, switch_stack, r21);
+ OFFSET(SW_R22, switch_stack, r22);
+ OFFSET(SW_R23, switch_stack, r23);
+ OFFSET(SW_FP, switch_stack, fp);
+ OFFSET(SW_GP, switch_stack, gp);
+ OFFSET(SW_RA, switch_stack, ra);
+ DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
+ BLANK();
+
+ /* struct thread_info */
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count);
+ BLANK();
+
+ return 0;
+}
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
new file mode 100644
index 000000000000..51d5bb90d3e5
--- /dev/null
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * Based on cpuinfo.c from microblaze
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <asm/cpuinfo.h>
+
+struct cpuinfo cpuinfo;
+
+#define err_cpu(x) \
+ pr_err("ERROR: Nios II " x " different for kernel and DTS\n")
+
+static inline u32 fcpu(struct device_node *cpu, const char *n)
+{
+ u32 val = 0;
+
+ of_property_read_u32(cpu, n, &val);
+
+ return val;
+}
+
+static inline u32 fcpu_has(struct device_node *cpu, const char *n)
+{
+ return of_get_property(cpu, n, NULL) ? 1 : 0;
+}
+
+void __init setup_cpuinfo(void)
+{
+ struct device_node *cpu;
+ const char *str;
+ int len;
+
+ cpu = of_find_node_by_type(NULL, "cpu");
+ if (!cpu)
+ panic("%s: No CPU found in devicetree!\n", __func__);
+
+ if (!fcpu_has(cpu, "altr,has-initda"))
+ panic("initda instruction is unimplemented. Please update your "
+ "hardware system to have more than 4-byte line data "
+ "cache\n");
+
+ cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency");
+
+ str = of_get_property(cpu, "altr,implementation", &len);
+ if (str)
+ strlcpy(cpuinfo.cpu_impl, str, sizeof(cpuinfo.cpu_impl));
+ else
+ strcpy(cpuinfo.cpu_impl, "<unknown>");
+
+ cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
+ cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
+ cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
+
+ if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
+ err_cpu("DIV");
+
+ if (IS_ENABLED(CONFIG_NIOS2_HW_MUL_SUPPORT) && !cpuinfo.has_mul)
+ err_cpu("MUL");
+
+ if (IS_ENABLED(CONFIG_NIOS2_HW_MULX_SUPPORT) && !cpuinfo.has_mulx)
+ err_cpu("MULX");
+
+ cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways");
+ if (!cpuinfo.tlb_num_ways)
+ panic("altr,tlb-num-ways can't be 0. Please check your hardware "
+ "system\n");
+ cpuinfo.icache_line_size = fcpu(cpu, "icache-line-size");
+ cpuinfo.icache_size = fcpu(cpu, "icache-size");
+ if (CONFIG_NIOS2_ICACHE_SIZE != cpuinfo.icache_size)
+ pr_warn("Warning: icache size configuration mismatch "
+ "(0x%x vs 0x%x) of CONFIG_NIOS2_ICACHE_SIZE vs "
+ "device tree icache-size\n",
+ CONFIG_NIOS2_ICACHE_SIZE, cpuinfo.icache_size);
+
+ cpuinfo.dcache_line_size = fcpu(cpu, "dcache-line-size");
+ if (CONFIG_NIOS2_DCACHE_LINE_SIZE != cpuinfo.dcache_line_size)
+ pr_warn("Warning: dcache line size configuration mismatch "
+ "(0x%x vs 0x%x) of CONFIG_NIOS2_DCACHE_LINE_SIZE vs "
+ "device tree dcache-line-size\n",
+ CONFIG_NIOS2_DCACHE_LINE_SIZE, cpuinfo.dcache_line_size);
+ cpuinfo.dcache_size = fcpu(cpu, "dcache-size");
+ if (CONFIG_NIOS2_DCACHE_SIZE != cpuinfo.dcache_size)
+ pr_warn("Warning: dcache size configuration mismatch "
+ "(0x%x vs 0x%x) of CONFIG_NIOS2_DCACHE_SIZE vs "
+ "device tree dcache-size\n",
+ CONFIG_NIOS2_DCACHE_SIZE, cpuinfo.dcache_size);
+
+ cpuinfo.tlb_pid_num_bits = fcpu(cpu, "altr,pid-num-bits");
+ cpuinfo.tlb_num_ways_log2 = ilog2(cpuinfo.tlb_num_ways);
+ cpuinfo.tlb_num_entries = fcpu(cpu, "altr,tlb-num-entries");
+ cpuinfo.tlb_num_lines = cpuinfo.tlb_num_entries / cpuinfo.tlb_num_ways;
+ cpuinfo.tlb_ptr_sz = fcpu(cpu, "altr,tlb-ptr-sz");
+
+ cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr");
+ cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr");
+ cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr");
+}
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ int count = 0;
+ const u32 clockfreq = cpuinfo.cpu_clock_freq;
+
+ count = seq_printf(m,
+ "CPU:\t\tNios II/%s\n"
+ "MMU:\t\t%s\n"
+ "FPU:\t\tnone\n"
+ "Clocking:\t%u.%02u MHz\n"
+ "BogoMips:\t%lu.%02lu\n"
+ "Calibration:\t%lu loops\n",
+ cpuinfo.cpu_impl,
+ cpuinfo.mmu ? "present" : "none",
+ clockfreq / 1000000, (clockfreq / 100000) % 10,
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100,
+ (loops_per_jiffy * HZ));
+
+ count += seq_printf(m,
+ "HW:\n"
+ " MUL:\t\t%s\n"
+ " MULX:\t\t%s\n"
+ " DIV:\t\t%s\n",
+ cpuinfo.has_mul ? "yes" : "no",
+ cpuinfo.has_mulx ? "yes" : "no",
+ cpuinfo.has_div ? "yes" : "no");
+
+ count += seq_printf(m,
+ "Icache:\t\t%ukB, line length: %u\n",
+ cpuinfo.icache_size >> 10,
+ cpuinfo.icache_line_size);
+
+ count += seq_printf(m,
+ "Dcache:\t\t%ukB, line length: %u\n",
+ cpuinfo.dcache_size >> 10,
+ cpuinfo.dcache_line_size);
+
+ count += seq_printf(m,
+ "TLB:\t\t%u ways, %u entries, %u PID bits\n",
+ cpuinfo.tlb_num_ways,
+ cpuinfo.tlb_num_entries,
+ cpuinfo.tlb_pid_num_bits);
+
+ return 0;
+}
+
+static void *cpuinfo_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < num_possible_cpus() ? (void *) (i + 1) : NULL;
+}
+
+static void *cpuinfo_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return cpuinfo_start(m, pos);
+}
+
+static void cpuinfo_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = cpuinfo_start,
+ .next = cpuinfo_next,
+ .stop = cpuinfo_stop,
+ .show = show_cpuinfo
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
new file mode 100644
index 000000000000..83bca17d1008
--- /dev/null
+++ b/arch/nios2/kernel/entry.S
@@ -0,0 +1,555 @@
+/*
+ * linux/arch/nios2/kernel/entry.S
+ *
+ * Copyright (C) 2013-2014 Altera Corporation
+ * Copyright (C) 2009, Wind River Systems Inc
+ *
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
+ * Kenneth Albanowski <kjahds@kjahds.com>,
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ * ColdFire support by Greg Ungerer (gerg@snapgear.com)
+ * 5307 fixes by David W. Miller
+ * linux 2.4 support David McCullough <davidm@snapgear.com>
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-macros.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/entry.h>
+#include <asm/unistd.h>
+#include <asm/processor.h>
+
+.macro GET_THREAD_INFO reg
+.if THREAD_SIZE & 0xffff0000
+ andhi \reg, sp, %hi(~(THREAD_SIZE-1))
+.else
+ addi \reg, r0, %lo(~(THREAD_SIZE-1))
+ and \reg, \reg, sp
+.endif
+.endm
+
+.macro kuser_cmpxchg_check
+ /*
+ * Make sure our user space atomic helper is restarted if it was
+ * interrupted in a critical region.
+ * ea-4 = address of interrupted insn (ea must be preserved).
+ * sp = saved regs.
+ * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
+ * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
+ * cmpxchg_ldw + 4.
+ */
+ /* et = cmpxchg_stw + 4 */
+ movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
+ bgtu ea, et, 1f
+
+ subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
+ bltu ea, et, 1f
+ stw et, PT_EA(sp) /* fix up EA */
+ mov ea, et
+1:
+.endm
+
+.section .rodata
+.align 4
+exception_table:
+ .word unhandled_exception /* 0 - Reset */
+ .word unhandled_exception /* 1 - Processor-only Reset */
+ .word external_interrupt /* 2 - Interrupt */
+ .word handle_trap /* 3 - Trap Instruction */
+
+ .word instruction_trap /* 4 - Unimplemented instruction */
+ .word handle_illegal /* 5 - Illegal instruction */
+ .word handle_unaligned /* 6 - Misaligned data access */
+ .word handle_unaligned /* 7 - Misaligned destination address */
+
+ .word handle_diverror /* 8 - Division error */
+ .word protection_exception_ba /* 9 - Supervisor-only instr. address */
+ .word protection_exception_instr /* 10 - Supervisor only instruction */
+ .word protection_exception_ba /* 11 - Supervisor only data address */
+
+ .word unhandled_exception /* 12 - Double TLB miss (data) */
+ .word protection_exception_pte /* 13 - TLB permission violation (x) */
+ .word protection_exception_pte /* 14 - TLB permission violation (r) */
+ .word protection_exception_pte /* 15 - TLB permission violation (w) */
+
+ .word unhandled_exception /* 16 - MPU region violation */
+
+trap_table:
+ .word handle_system_call /* 0 */
+ .word instruction_trap /* 1 */
+ .word instruction_trap /* 2 */
+ .word instruction_trap /* 3 */
+ .word instruction_trap /* 4 */
+ .word instruction_trap /* 5 */
+ .word instruction_trap /* 6 */
+ .word instruction_trap /* 7 */
+ .word instruction_trap /* 8 */
+ .word instruction_trap /* 9 */
+ .word instruction_trap /* 10 */
+ .word instruction_trap /* 11 */
+ .word instruction_trap /* 12 */
+ .word instruction_trap /* 13 */
+ .word instruction_trap /* 14 */
+ .word instruction_trap /* 15 */
+ .word instruction_trap /* 16 */
+ .word instruction_trap /* 17 */
+ .word instruction_trap /* 18 */
+ .word instruction_trap /* 19 */
+ .word instruction_trap /* 20 */
+ .word instruction_trap /* 21 */
+ .word instruction_trap /* 22 */
+ .word instruction_trap /* 23 */
+ .word instruction_trap /* 24 */
+ .word instruction_trap /* 25 */
+ .word instruction_trap /* 26 */
+ .word instruction_trap /* 27 */
+ .word instruction_trap /* 28 */
+ .word instruction_trap /* 29 */
+ .word instruction_trap /* 30 */
+ .word handle_breakpoint /* 31 */
+
+.text
+.set noat
+.set nobreak
+
+ENTRY(inthandler)
+ SAVE_ALL
+
+ kuser_cmpxchg_check
+
+ /* Clear EH bit before we get a new excpetion in the kernel
+ * and after we have saved it to the exception frame. This is done
+ * whether it's trap, tlb-miss or interrupt. If we don't do this
+ * estatus is not updated the next exception.
+ */
+ rdctl r24, status
+ movi r9, %lo(~STATUS_EH)
+ and r24, r24, r9
+ wrctl status, r24
+
+ /* Read cause and vector and branch to the associated handler */
+ mov r4, sp
+ rdctl r5, exception
+ movia r9, exception_table
+ add r24, r9, r5
+ ldw r24, 0(r24)
+ jmp r24
+
+
+/***********************************************************************
+ * Handle traps
+ ***********************************************************************
+ */
+ENTRY(handle_trap)
+ ldw r24, -4(ea) /* instruction that caused the exception */
+ srli r24, r24, 4
+ andi r24, r24, 0x7c
+ movia r9,trap_table
+ add r24, r24, r9
+ ldw r24, 0(r24)
+ jmp r24
+
+
+/***********************************************************************
+ * Handle system calls
+ ***********************************************************************
+ */
+ENTRY(handle_system_call)
+ /* Enable interrupts */
+ rdctl r10, status
+ ori r10, r10, STATUS_PIE
+ wrctl status, r10
+
+ /* Reload registers destroyed by common code. */
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+
+local_restart:
+ /* Check that the requested system call is within limits */
+ movui r1, __NR_syscalls
+ bgeu r2, r1, ret_invsyscall
+ slli r1, r2, 2
+ movhi r11, %hiadj(sys_call_table)
+ add r1, r1, r11
+ ldw r1, %lo(sys_call_table)(r1)
+ beq r1, r0, ret_invsyscall
+
+ /* Check if we are being traced */
+ GET_THREAD_INFO r11
+ ldw r11,TI_FLAGS(r11)
+ BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call
+
+ /* Execute the system call */
+ callr r1
+
+ /* If the syscall returns a negative result:
+ * Set r7 to 1 to indicate error,
+ * Negate r2 to get a positive error code
+ * If the syscall returns zero or a positive value:
+ * Set r7 to 0.
+ * The sigreturn system calls will skip the code below by
+ * adding to register ra. To avoid destroying registers
+ */
+translate_rc_and_ret:
+ movi r1, 0
+ bge r2, zero, 3f
+ sub r2, zero, r2
+ movi r1, 1
+3:
+ stw r2, PT_R2(sp)
+ stw r1, PT_R7(sp)
+end_translate_rc_and_ret:
+
+ret_from_exception:
+ ldw r1, PT_ESTATUS(sp)
+ /* if so, skip resched, signals */
+ TSTBNZ r1, r1, ESTATUS_EU, Luser_return
+
+restore_all:
+ rdctl r10, status /* disable intrs */
+ andi r10, r10, %lo(~STATUS_PIE)
+ wrctl status, r10
+ RESTORE_ALL
+ eret
+
+ /* If the syscall number was invalid return ENOSYS */
+ret_invsyscall:
+ movi r2, -ENOSYS
+ br translate_rc_and_ret
+
+ /* This implements the same as above, except it calls
+ * do_syscall_trace_enter and do_syscall_trace_exit before and after the
+ * syscall in order for utilities like strace and gdb to work.
+ */
+traced_system_call:
+ SAVE_SWITCH_STACK
+ call do_syscall_trace_enter
+ RESTORE_SWITCH_STACK
+
+ /* Create system call register arguments. The 5th and 6th
+ arguments on stack are already in place at the beginning
+ of pt_regs. */
+ ldw r2, PT_R2(sp)
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+
+ /* Fetch the syscall function, we don't need to check the boundaries
+ * since this is already done.
+ */
+ slli r1, r2, 2
+ movhi r11,%hiadj(sys_call_table)
+ add r1, r1, r11
+ ldw r1, %lo(sys_call_table)(r1)
+
+ callr r1
+
+ /* If the syscall returns a negative result:
+ * Set r7 to 1 to indicate error,
+ * Negate r2 to get a positive error code
+ * If the syscall returns zero or a positive value:
+ * Set r7 to 0.
+ * The sigreturn system calls will skip the code below by
+ * adding to register ra. To avoid destroying registers
+ */
+translate_rc_and_ret2:
+ movi r1, 0
+ bge r2, zero, 4f
+ sub r2, zero, r2
+ movi r1, 1
+4:
+ stw r2, PT_R2(sp)
+ stw r1, PT_R7(sp)
+end_translate_rc_and_ret2:
+ SAVE_SWITCH_STACK
+ call do_syscall_trace_exit
+ RESTORE_SWITCH_STACK
+ br ret_from_exception
+
+Luser_return:
+ GET_THREAD_INFO r11 /* get thread_info pointer */
+ ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
+ ANDI32 r11, r10, _TIF_WORK_MASK
+ beq r11, r0, restore_all /* Nothing to do */
+ BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return
+
+ /* Reschedule work */
+ call schedule
+ br ret_from_exception
+
+Lsignal_return:
+ ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
+ beq r1, r0, restore_all
+ mov r4, sp /* pt_regs */
+ SAVE_SWITCH_STACK
+ call do_notify_resume
+ beq r2, r0, no_work_pending
+ RESTORE_SWITCH_STACK
+ /* prepare restart syscall here without leaving kernel */
+ ldw r2, PT_R2(sp) /* reload syscall number in r2 */
+ ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+ ldw r8, PT_R8(sp)
+ ldw r9, PT_R9(sp)
+ br local_restart /* restart syscall */
+
+no_work_pending:
+ RESTORE_SWITCH_STACK
+ br ret_from_exception
+
+/***********************************************************************
+ * Handle external interrupts.
+ ***********************************************************************
+ */
+/*
+ * This is the generic interrupt handler (for all hardware interrupt
+ * sources). It figures out the vector number and calls the appropriate
+ * interrupt service routine directly.
+ */
+external_interrupt:
+ rdctl r12, ipending
+ rdctl r9, ienable
+ and r12, r12, r9
+ /* skip if no interrupt is pending */
+ beq r12, r0, ret_from_interrupt
+
+ movi r24, -1
+ stw r24, PT_ORIG_R2(sp)
+
+ /*
+ * Process an external hardware interrupt.
+ */
+
+ addi ea, ea, -4 /* re-issue the interrupted instruction */
+ stw ea, PT_EA(sp)
+2: movi r4, %lo(-1) /* Start from bit position 0,
+ highest priority */
+ /* This is the IRQ # for handler call */
+1: andi r10, r12, 1 /* Isolate bit we are interested in */
+ srli r12, r12, 1 /* shift count is costly without hardware
+ multiplier */
+ addi r4, r4, 1
+ beq r10, r0, 1b
+ mov r5, sp /* Setup pt_regs pointer for handler call */
+ call do_IRQ
+ rdctl r12, ipending /* check again if irq still pending */
+ rdctl r9, ienable /* Isolate possible interrupts */
+ and r12, r12, r9
+ bne r12, r0, 2b
+ /* br ret_from_interrupt */ /* fall through to ret_from_interrupt */
+
+ENTRY(ret_from_interrupt)
+ ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
+ TSTBNZ r1, r1, ESTATUS_EU, Luser_return
+
+#ifdef CONFIG_PREEMPT
+ GET_THREAD_INFO r1
+ ldw r4, TI_PREEMPT_COUNT(r1)
+ bne r4, r0, restore_all
+
+need_resched:
+ ldw r4, TI_FLAGS(r1) /* ? Need resched set */
+ BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
+ ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
+ andi r10, r4, ESTATUS_EPIE
+ beq r10, r0, restore_all
+ movia r4, PREEMPT_ACTIVE
+ stw r4, TI_PREEMPT_COUNT(r1)
+ rdctl r10, status /* enable intrs again */
+ ori r10, r10 ,STATUS_PIE
+ wrctl status, r10
+ PUSH r1
+ call schedule
+ POP r1
+ mov r4, r0
+ stw r4, TI_PREEMPT_COUNT(r1)
+ rdctl r10, status /* disable intrs */
+ andi r10, r10, %lo(~STATUS_PIE)
+ wrctl status, r10
+ br need_resched
+#else
+ br restore_all
+#endif
+
+/***********************************************************************
+ * A few syscall wrappers
+ ***********************************************************************
+ */
+/*
+ * int clone(unsigned long clone_flags, unsigned long newsp,
+ * int __user * parent_tidptr, int __user * child_tidptr,
+ * int tls_val)
+ */
+ENTRY(sys_clone)
+ SAVE_SWITCH_STACK
+ addi sp, sp, -4
+ stw r7, 0(sp) /* Pass 5th arg thru stack */
+ mov r7, r6 /* 4th arg is 3rd of clone() */
+ mov r6, zero /* 3rd arg always 0 */
+ call do_fork
+ addi sp, sp, 4
+ RESTORE_SWITCH_STACK
+ ret
+
+ENTRY(sys_rt_sigreturn)
+ SAVE_SWITCH_STACK
+ mov r4, sp
+ call do_rt_sigreturn
+ RESTORE_SWITCH_STACK
+ addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
+ ret
+
+/***********************************************************************
+ * A few other wrappers and stubs
+ ***********************************************************************
+ */
+protection_exception_pte:
+ rdctl r6, pteaddr
+ slli r6, r6, 10
+ call do_page_fault
+ br ret_from_exception
+
+protection_exception_ba:
+ rdctl r6, badaddr
+ call do_page_fault
+ br ret_from_exception
+
+protection_exception_instr:
+ call handle_supervisor_instr
+ br ret_from_exception
+
+handle_breakpoint:
+ call breakpoint_c
+ br ret_from_exception
+
+#ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
+handle_unaligned:
+ SAVE_SWITCH_STACK
+ call handle_unaligned_c
+ RESTORE_SWITCH_STACK
+ br ret_from_exception
+#else
+handle_unaligned:
+ call handle_unaligned_c
+ br ret_from_exception
+#endif
+
+handle_illegal:
+ call handle_illegal_c
+ br ret_from_exception
+
+handle_diverror:
+ call handle_diverror_c
+ br ret_from_exception
+
+/*
+ * Beware - when entering resume, prev (the current task) is
+ * in r4, next (the new task) is in r5, don't change these
+ * registers.
+ */
+ENTRY(resume)
+
+ rdctl r7, status /* save thread status reg */
+ stw r7, TASK_THREAD + THREAD_KPSR(r4)
+
+ andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */
+ wrctl status, r7
+
+ SAVE_SWITCH_STACK
+ stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
+ ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
+ movia r24, _current_thread /* save thread */
+ GET_THREAD_INFO r1
+ stw r1, 0(r24)
+ RESTORE_SWITCH_STACK
+
+ ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
+ wrctl status, r7
+ ret
+
+ENTRY(ret_from_fork)
+ call schedule_tail
+ br ret_from_exception
+
+ENTRY(ret_from_kernel_thread)
+ call schedule_tail
+ mov r4,r17 /* arg */
+ callr r16 /* function */
+ br ret_from_exception
+
+/*
+ * Kernel user helpers.
+ *
+ * Each segment is 64-byte aligned and will be mapped to the <User space>.
+ * New segments (if ever needed) must be added after the existing ones.
+ * This mechanism should be used only for things that are really small and
+ * justified, and not be abused freely.
+ *
+ */
+
+ /* Filling pads with undefined instructions. */
+.macro kuser_pad sym size
+ .if ((. - \sym) & 3)
+ .rept (4 - (. - \sym) & 3)
+ .byte 0
+ .endr
+ .endif
+ .rept ((\size - (. - \sym)) / 4)
+ .word 0xdeadbeef
+ .endr
+.endm
+
+ .align 6
+ .globl __kuser_helper_start
+__kuser_helper_start:
+
+__kuser_helper_version: /* @ 0x1000 */
+ .word ((__kuser_helper_end - __kuser_helper_start) >> 6)
+
+__kuser_cmpxchg: /* @ 0x1004 */
+ /*
+ * r4 pointer to exchange variable
+ * r5 old value
+ * r6 new value
+ */
+cmpxchg_ldw:
+ ldw r2, 0(r4) /* load current value */
+ sub r2, r2, r5 /* compare with old value */
+ bne r2, zero, cmpxchg_ret
+
+ /* We had a match, store the new value */
+cmpxchg_stw:
+ stw r6, 0(r4)
+cmpxchg_ret:
+ ret
+
+ kuser_pad __kuser_cmpxchg, 64
+
+ .globl __kuser_sigtramp
+__kuser_sigtramp:
+ movi r2, __NR_rt_sigreturn
+ trap
+
+ kuser_pad __kuser_sigtramp, 64
+
+ .globl __kuser_helper_end
+__kuser_helper_end:
diff --git a/arch/nios2/kernel/head.S b/arch/nios2/kernel/head.S
new file mode 100644
index 000000000000..372ce4a33018
--- /dev/null
+++ b/arch/nios2/kernel/head.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ * Copyright (C) 2001 Vic Phillips, Microtronix Datacom Ltd.
+ *
+ * Based on head.S for Altera's Excalibur development board with nios processor
+ *
+ * Based on the following from the Excalibur sdk distribution:
+ * NA_MemoryMap.s, NR_JumpToStart.s, NR_Setup.s, NR_CWPManager.s
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-macros.h>
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+.data
+.global empty_zero_page
+.align 12
+empty_zero_page:
+ .space PAGE_SIZE
+
+/*
+ * This global variable is used as an extension to the nios'
+ * STATUS register to emulate a user/supervisor mode.
+ */
+ .data
+ .align 2
+ .set noat
+
+ .global _current_thread
+_current_thread:
+ .long 0
+/*
+ * Input(s): passed from u-boot
+ * r4 - Optional pointer to a board information structure.
+ * r5 - Optional pointer to the physical starting address of the init RAM
+ * disk.
+ * r6 - Optional pointer to the physical ending address of the init RAM
+ * disk.
+ * r7 - Optional pointer to the physical starting address of any kernel
+ * command-line parameters.
+ */
+
+/*
+ * First executable code - detected and jumped to by the ROM bootstrap
+ * if the code resides in flash (looks for "Nios" at offset 0x0c from
+ * the potential executable image).
+ */
+ __HEAD
+ENTRY(_start)
+ wrctl status, r0 /* Disable interrupts */
+
+ /* Initialize all cache lines within the instruction cache */
+ movia r1, NIOS2_ICACHE_SIZE
+ movui r2, NIOS2_ICACHE_LINE_SIZE
+
+icache_init:
+ initi r1
+ sub r1, r1, r2
+ bgt r1, r0, icache_init
+ br 1f
+
+ /*
+ * This is the default location for the exception handler. Code in jump
+ * to our handler
+ */
+ENTRY(exception_handler_hook)
+ movia r24, inthandler
+ jmp r24
+
+ENTRY(fast_handler)
+ nextpc et
+helper:
+ stw r3, r3save - helper(et)
+
+ rdctl r3 , pteaddr
+ srli r3, r3, 12
+ slli r3, r3, 2
+ movia et, pgd_current
+
+ ldw et, 0(et)
+ add r3, et, r3
+ ldw et, 0(r3)
+
+ rdctl r3, pteaddr
+ andi r3, r3, 0xfff
+ add et, r3, et
+ ldw et, 0(et)
+ wrctl tlbacc, et
+ nextpc et
+helper2:
+ ldw r3, r3save - helper2(et)
+ subi ea, ea, 4
+ eret
+r3save:
+ .word 0x0
+ENTRY(fast_handler_end)
+
+1:
+ /*
+ * After the instruction cache is initialized, the data cache must
+ * also be initialized.
+ */
+ movia r1, NIOS2_DCACHE_SIZE
+ movui r2, NIOS2_DCACHE_LINE_SIZE
+
+dcache_init:
+ initd 0(r1)
+ sub r1, r1, r2
+ bgt r1, r0, dcache_init
+
+ nextpc r1 /* Find out where we are */
+chkadr:
+ movia r2, chkadr
+ beq r1, r2,finish_move /* We are running in RAM done */
+ addi r1, r1,(_start - chkadr) /* Source */
+ movia r2, _start /* Destination */
+ movia r3, __bss_start /* End of copy */
+
+loop_move: /* r1: src, r2: dest, r3: last dest */
+ ldw r8, 0(r1) /* load a word from [r1] */
+ stw r8, 0(r2) /* store a word to dest [r2] */
+ flushd 0(r2) /* Flush cache for safety */
+ addi r1, r1, 4 /* inc the src addr */
+ addi r2, r2, 4 /* inc the dest addr */
+ blt r2, r3, loop_move
+
+ movia r1, finish_move /* VMA(_start)->l1 */
+ jmp r1 /* jmp to _start */
+
+finish_move:
+
+ /* Mask off all possible interrupts */
+ wrctl ienable, r0
+
+ /* Clear .bss */
+ movia r2, __bss_start
+ movia r1, __bss_stop
+1:
+ stb r0, 0(r2)
+ addi r2, r2, 1
+ bne r1, r2, 1b
+
+ movia r1, init_thread_union /* set stack at top of the task union */
+ addi sp, r1, THREAD_SIZE
+ movia r2, _current_thread /* Remember current thread */
+ stw r1, 0(r2)
+
+ movia r1, nios2_boot_init /* save args r4-r7 passed from u-boot */
+ callr r1
+
+ movia r1, start_kernel /* call start_kernel as a subroutine */
+ callr r1
+
+ /* If we return from start_kernel, break to the oci debugger and
+ * buggered we are.
+ */
+ break
+
+ /* End of startup code */
+.set at
diff --git a/arch/nios2/kernel/insnemu.S b/arch/nios2/kernel/insnemu.S
new file mode 100644
index 000000000000..1c6b651e770d
--- /dev/null
+++ b/arch/nios2/kernel/insnemu.S
@@ -0,0 +1,592 @@
+/*
+ * Copyright (C) 2003-2013 Altera Corporation
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+
+.set noat
+.set nobreak
+
+/*
+* Explicitly allow the use of r1 (the assembler temporary register)
+* within this code. This register is normally reserved for the use of
+* the compiler.
+*/
+
+ENTRY(instruction_trap)
+ ldw r1, PT_R1(sp) // Restore registers
+ ldw r2, PT_R2(sp)
+ ldw r3, PT_R3(sp)
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+ ldw r8, PT_R8(sp)
+ ldw r9, PT_R9(sp)
+ ldw r10, PT_R10(sp)
+ ldw r11, PT_R11(sp)
+ ldw r12, PT_R12(sp)
+ ldw r13, PT_R13(sp)
+ ldw r14, PT_R14(sp)
+ ldw r15, PT_R15(sp)
+ ldw ra, PT_RA(sp)
+ ldw fp, PT_FP(sp)
+ ldw gp, PT_GP(sp)
+ ldw et, PT_ESTATUS(sp)
+ wrctl estatus, et
+ ldw ea, PT_EA(sp)
+ ldw et, PT_SP(sp) /* backup sp in et */
+
+ addi sp, sp, PT_REGS_SIZE
+
+ /* INSTRUCTION EMULATION
+ * ---------------------
+ *
+ * Nios II processors generate exceptions for unimplemented instructions.
+ * The routines below emulate these instructions. Depending on the
+ * processor core, the only instructions that might need to be emulated
+ * are div, divu, mul, muli, mulxss, mulxsu, and mulxuu.
+ *
+ * The emulations match the instructions, except for the following
+ * limitations:
+ *
+ * 1) The emulation routines do not emulate the use of the exception
+ * temporary register (et) as a source operand because the exception
+ * handler already has modified it.
+ *
+ * 2) The routines do not emulate the use of the stack pointer (sp) or
+ * the exception return address register (ea) as a destination because
+ * modifying these registers crashes the exception handler or the
+ * interrupted routine.
+ *
+ * Detailed Design
+ * ---------------
+ *
+ * The emulation routines expect the contents of integer registers r0-r31
+ * to be on the stack at addresses sp, 4(sp), 8(sp), ... 124(sp). The
+ * routines retrieve source operands from the stack and modify the
+ * destination register's value on the stack prior to the end of the
+ * exception handler. Then all registers except the destination register
+ * are restored to their previous values.
+ *
+ * The instruction that causes the exception is found at address -4(ea).
+ * The instruction's OP and OPX fields identify the operation to be
+ * performed.
+ *
+ * One instruction, muli, is an I-type instruction that is identified by
+ * an OP field of 0x24.
+ *
+ * muli AAAAA,BBBBB,IIIIIIIIIIIIIIII,-0x24-
+ * 27 22 6 0 <-- LSB of field
+ *
+ * The remaining emulated instructions are R-type and have an OP field
+ * of 0x3a. Their OPX fields identify them.
+ *
+ * R-type AAAAA,BBBBB,CCCCC,XXXXXX,NNNNN,-0x3a-
+ * 27 22 17 11 6 0 <-- LSB of field
+ *
+ *
+ * Opcode Encoding. muli is identified by its OP value. Then OPX & 0x02
+ * is used to differentiate between the division opcodes and the
+ * remaining multiplication opcodes.
+ *
+ * Instruction OP OPX OPX & 0x02
+ * ----------- ---- ---- ----------
+ * muli 0x24
+ * divu 0x3a 0x24 0
+ * div 0x3a 0x25 0
+ * mul 0x3a 0x27 != 0
+ * mulxuu 0x3a 0x07 != 0
+ * mulxsu 0x3a 0x17 != 0
+ * mulxss 0x3a 0x1f != 0
+ */
+
+
+ /*
+ * Save everything on the stack to make it easy for the emulation
+ * routines to retrieve the source register operands.
+ */
+
+ addi sp, sp, -128
+ stw zero, 0(sp) /* Save zero on stack to avoid special case for r0. */
+ stw r1, 4(sp)
+ stw r2, 8(sp)
+ stw r3, 12(sp)
+ stw r4, 16(sp)
+ stw r5, 20(sp)
+ stw r6, 24(sp)
+ stw r7, 28(sp)
+ stw r8, 32(sp)
+ stw r9, 36(sp)
+ stw r10, 40(sp)
+ stw r11, 44(sp)
+ stw r12, 48(sp)
+ stw r13, 52(sp)
+ stw r14, 56(sp)
+ stw r15, 60(sp)
+ stw r16, 64(sp)
+ stw r17, 68(sp)
+ stw r18, 72(sp)
+ stw r19, 76(sp)
+ stw r20, 80(sp)
+ stw r21, 84(sp)
+ stw r22, 88(sp)
+ stw r23, 92(sp)
+ /* Don't bother to save et. It's already been changed. */
+ rdctl r5, estatus
+ stw r5, 100(sp)
+
+ stw gp, 104(sp)
+ stw et, 108(sp) /* et contains previous sp value. */
+ stw fp, 112(sp)
+ stw ea, 116(sp)
+ stw ra, 120(sp)
+
+
+ /*
+ * Split the instruction into its fields. We need 4*A, 4*B, and 4*C as
+ * offsets to the stack pointer for access to the stored register values.
+ */
+ ldw r2,-4(ea) /* r2 = AAAAA,BBBBB,IIIIIIIIIIIIIIII,PPPPPP */
+ roli r3, r2, 7 /* r3 = BBB,IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BB */
+ roli r4, r3, 3 /* r4 = IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB */
+ roli r5, r4, 2 /* r5 = IIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB,II */
+ srai r4, r4, 16 /* r4 = (sign-extended) IMM16 */
+ roli r6, r5, 5 /* r6 = XXXX,NNNNN,PPPPPP,AAAAA,BBBBB,CCCCC,XX */
+ andi r2, r2, 0x3f /* r2 = 00000000000000000000000000,PPPPPP */
+ andi r3, r3, 0x7c /* r3 = 0000000000000000000000000,AAAAA,00 */
+ andi r5, r5, 0x7c /* r5 = 0000000000000000000000000,BBBBB,00 */
+ andi r6, r6, 0x7c /* r6 = 0000000000000000000000000,CCCCC,00 */
+
+ /* Now
+ * r2 = OP
+ * r3 = 4*A
+ * r4 = IMM16 (sign extended)
+ * r5 = 4*B
+ * r6 = 4*C
+ */
+
+ /*
+ * Get the operands.
+ *
+ * It is necessary to check for muli because it uses an I-type
+ * instruction format, while the other instructions are have an R-type
+ * format.
+ *
+ * Prepare for either multiplication or division loop.
+ * They both loop 32 times.
+ */
+ movi r14, 32
+
+ add r3, r3, sp /* r3 = address of A-operand. */
+ ldw r3, 0(r3) /* r3 = A-operand. */
+ movi r7, 0x24 /* muli opcode (I-type instruction format) */
+ beq r2, r7, mul_immed /* muli doesn't use the B register as a source */
+
+ add r5, r5, sp /* r5 = address of B-operand. */
+ ldw r5, 0(r5) /* r5 = B-operand. */
+ /* r4 = SSSSSSSSSSSSSSSS,-----IMM16------ */
+ /* IMM16 not needed, align OPX portion */
+ /* r4 = SSSSSSSSSSSSSSSS,CCCCC,-OPX--,00000 */
+ srli r4, r4, 5 /* r4 = 00000,SSSSSSSSSSSSSSSS,CCCCC,-OPX-- */
+ andi r4, r4, 0x3f /* r4 = 00000000000000000000000000,-OPX-- */
+
+ /* Now
+ * r2 = OP
+ * r3 = src1
+ * r5 = src2
+ * r4 = OPX (no longer can be muli)
+ * r6 = 4*C
+ */
+
+
+ /*
+ * Multiply or Divide?
+ */
+ andi r7, r4, 0x02 /* For R-type multiply instructions,
+ OPX & 0x02 != 0 */
+ bne r7, zero, multiply
+
+
+ /* DIVISION
+ *
+ * Divide an unsigned dividend by an unsigned divisor using
+ * a shift-and-subtract algorithm. The example below shows
+ * 43 div 7 = 6 for 8-bit integers. This classic algorithm uses a
+ * single register to store both the dividend and the quotient,
+ * allowing both values to be shifted with a single instruction.
+ *
+ * remainder dividend:quotient
+ * --------- -----------------
+ * initialize 00000000 00101011:
+ * shift 00000000 0101011:_
+ * remainder >= divisor? no 00000000 0101011:0
+ * shift 00000000 101011:0_
+ * remainder >= divisor? no 00000000 101011:00
+ * shift 00000001 01011:00_
+ * remainder >= divisor? no 00000001 01011:000
+ * shift 00000010 1011:000_
+ * remainder >= divisor? no 00000010 1011:0000
+ * shift 00000101 011:0000_
+ * remainder >= divisor? no 00000101 011:00000
+ * shift 00001010 11:00000_
+ * remainder >= divisor? yes 00001010 11:000001
+ * remainder -= divisor - 00000111
+ * ----------
+ * 00000011 11:000001
+ * shift 00000111 1:000001_
+ * remainder >= divisor? yes 00000111 1:0000011
+ * remainder -= divisor - 00000111
+ * ----------
+ * 00000000 1:0000011
+ * shift 00000001 :0000011_
+ * remainder >= divisor? no 00000001 :00000110
+ *
+ * The quotient is 00000110.
+ */
+
+divide:
+ /*
+ * Prepare for division by assuming the result
+ * is unsigned, and storing its "sign" as 0.
+ */
+ movi r17, 0
+
+
+ /* Which division opcode? */
+ xori r7, r4, 0x25 /* OPX of div */
+ bne r7, zero, unsigned_division
+
+
+ /*
+ * OPX is div. Determine and store the sign of the quotient.
+ * Then take the absolute value of both operands.
+ */
+ xor r17, r3, r5 /* MSB contains sign of quotient */
+ bge r3,zero,dividend_is_nonnegative
+ sub r3, zero, r3 /* -r3 */
+dividend_is_nonnegative:
+ bge r5, zero, divisor_is_nonnegative
+ sub r5, zero, r5 /* -r5 */
+divisor_is_nonnegative:
+
+
+unsigned_division:
+ /* Initialize the unsigned-division loop. */
+ movi r13, 0 /* remainder = 0 */
+
+ /* Now
+ * r3 = dividend : quotient
+ * r4 = 0x25 for div, 0x24 for divu
+ * r5 = divisor
+ * r13 = remainder
+ * r14 = loop counter (already initialized to 32)
+ * r17 = MSB contains sign of quotient
+ */
+
+
+ /*
+ * for (count = 32; count > 0; --count)
+ * {
+ */
+divide_loop:
+
+ /*
+ * Division:
+ *
+ * (remainder:dividend:quotient) <<= 1;
+ */
+ slli r13, r13, 1
+ cmplt r7, r3, zero /* r7 = MSB of r3 */
+ or r13, r13, r7
+ slli r3, r3, 1
+
+
+ /*
+ * if (remainder >= divisor)
+ * {
+ * set LSB of quotient
+ * remainder -= divisor;
+ * }
+ */
+ bltu r13, r5, div_skip
+ ori r3, r3, 1
+ sub r13, r13, r5
+div_skip:
+
+ /*
+ * }
+ */
+ subi r14, r14, 1
+ bne r14, zero, divide_loop
+
+
+ /* Now
+ * r3 = quotient
+ * r4 = 0x25 for div, 0x24 for divu
+ * r6 = 4*C
+ * r17 = MSB contains sign of quotient
+ */
+
+
+ /*
+ * Conditionally negate signed quotient. If quotient is unsigned,
+ * the sign already is initialized to 0.
+ */
+ bge r17, zero, quotient_is_nonnegative
+ sub r3, zero, r3 /* -r3 */
+ quotient_is_nonnegative:
+
+
+ /*
+ * Final quotient is in r3.
+ */
+ add r6, r6, sp
+ stw r3, 0(r6) /* write quotient to stack */
+ br restore_registers
+
+
+
+
+ /* MULTIPLICATION
+ *
+ * A "product" is the number that one gets by summing a "multiplicand"
+ * several times. The "multiplier" specifies the number of copies of the
+ * multiplicand that are summed.
+ *
+ * Actual multiplication algorithms don't use repeated addition, however.
+ * Shift-and-add algorithms get the same answer as repeated addition, and
+ * they are faster. To compute the lower half of a product (pppp below)
+ * one shifts the product left before adding in each of the partial
+ * products (a * mmmm) through (d * mmmm).
+ *
+ * To compute the upper half of a product (PPPP below), one adds in the
+ * partial products (d * mmmm) through (a * mmmm), each time following
+ * the add by a right shift of the product.
+ *
+ * mmmm
+ * * abcd
+ * ------
+ * #### = d * mmmm
+ * #### = c * mmmm
+ * #### = b * mmmm
+ * #### = a * mmmm
+ * --------
+ * PPPPpppp
+ *
+ * The example above shows 4 partial products. Computing actual Nios II
+ * products requires 32 partials.
+ *
+ * It is possible to compute the result of mulxsu from the result of
+ * mulxuu because the only difference between the results of these two
+ * opcodes is the value of the partial product associated with the sign
+ * bit of rA.
+ *
+ * mulxsu = mulxuu - (rA < 0) ? rB : 0;
+ *
+ * It is possible to compute the result of mulxss from the result of
+ * mulxsu because the only difference between the results of these two
+ * opcodes is the value of the partial product associated with the sign
+ * bit of rB.
+ *
+ * mulxss = mulxsu - (rB < 0) ? rA : 0;
+ *
+ */
+
+mul_immed:
+ /* Opcode is muli. Change it into mul for remainder of algorithm. */
+ mov r6, r5 /* Field B is dest register, not field C. */
+ mov r5, r4 /* Field IMM16 is src2, not field B. */
+ movi r4, 0x27 /* OPX of mul is 0x27 */
+
+multiply:
+ /* Initialize the multiplication loop. */
+ movi r9, 0 /* mul_product = 0 */
+ movi r10, 0 /* mulxuu_product = 0 */
+ mov r11, r5 /* save original multiplier for mulxsu and mulxss */
+ mov r12, r5 /* mulxuu_multiplier (will be shifted) */
+ movi r16, 1 /* used to create "rori B,A,1" from "ror B,A,r16" */
+
+ /* Now
+ * r3 = multiplicand
+ * r5 = mul_multiplier
+ * r6 = 4 * dest_register (used later as offset to sp)
+ * r7 = temp
+ * r9 = mul_product
+ * r10 = mulxuu_product
+ * r11 = original multiplier
+ * r12 = mulxuu_multiplier
+ * r14 = loop counter (already initialized)
+ * r16 = 1
+ */
+
+
+ /*
+ * for (count = 32; count > 0; --count)
+ * {
+ */
+multiply_loop:
+
+ /*
+ * mul_product <<= 1;
+ * lsb = multiplier & 1;
+ */
+ slli r9, r9, 1
+ andi r7, r12, 1
+
+ /*
+ * if (lsb == 1)
+ * {
+ * mulxuu_product += multiplicand;
+ * }
+ */
+ beq r7, zero, mulx_skip
+ add r10, r10, r3
+ cmpltu r7, r10, r3 /* Save the carry from the MSB of mulxuu_product. */
+ ror r7, r7, r16 /* r7 = 0x80000000 on carry, or else 0x00000000 */
+mulx_skip:
+
+ /*
+ * if (MSB of mul_multiplier == 1)
+ * {
+ * mul_product += multiplicand;
+ * }
+ */
+ bge r5, zero, mul_skip
+ add r9, r9, r3
+mul_skip:
+
+ /*
+ * mulxuu_product >>= 1; logical shift
+ * mul_multiplier <<= 1; done with MSB
+ * mulx_multiplier >>= 1; done with LSB
+ */
+ srli r10, r10, 1
+ or r10, r10, r7 /* OR in the saved carry bit. */
+ slli r5, r5, 1
+ srli r12, r12, 1
+
+
+ /*
+ * }
+ */
+ subi r14, r14, 1
+ bne r14, zero, multiply_loop
+
+
+ /*
+ * Multiply emulation loop done.
+ */
+
+ /* Now
+ * r3 = multiplicand
+ * r4 = OPX
+ * r6 = 4 * dest_register (used later as offset to sp)
+ * r7 = temp
+ * r9 = mul_product
+ * r10 = mulxuu_product
+ * r11 = original multiplier
+ */
+
+
+ /* Calculate address for result from 4 * dest_register */
+ add r6, r6, sp
+
+
+ /*
+ * Select/compute the result based on OPX.
+ */
+
+
+ /* OPX == mul? Then store. */
+ xori r7, r4, 0x27
+ beq r7, zero, store_product
+
+ /* It's one of the mulx.. opcodes. Move over the result. */
+ mov r9, r10
+
+ /* OPX == mulxuu? Then store. */
+ xori r7, r4, 0x07
+ beq r7, zero, store_product
+
+ /* Compute mulxsu
+ *
+ * mulxsu = mulxuu - (rA < 0) ? rB : 0;
+ */
+ bge r3, zero, mulxsu_skip
+ sub r9, r9, r11
+mulxsu_skip:
+
+ /* OPX == mulxsu? Then store. */
+ xori r7, r4, 0x17
+ beq r7, zero, store_product
+
+ /* Compute mulxss
+ *
+ * mulxss = mulxsu - (rB < 0) ? rA : 0;
+ */
+ bge r11,zero,mulxss_skip
+ sub r9, r9, r3
+mulxss_skip:
+ /* At this point, assume that OPX is mulxss, so store*/
+
+
+store_product:
+ stw r9, 0(r6)
+
+
+restore_registers:
+ /* No need to restore r0. */
+ ldw r5, 100(sp)
+ wrctl estatus, r5
+
+ ldw r1, 4(sp)
+ ldw r2, 8(sp)
+ ldw r3, 12(sp)
+ ldw r4, 16(sp)
+ ldw r5, 20(sp)
+ ldw r6, 24(sp)
+ ldw r7, 28(sp)
+ ldw r8, 32(sp)
+ ldw r9, 36(sp)
+ ldw r10, 40(sp)
+ ldw r11, 44(sp)
+ ldw r12, 48(sp)
+ ldw r13, 52(sp)
+ ldw r14, 56(sp)
+ ldw r15, 60(sp)
+ ldw r16, 64(sp)
+ ldw r17, 68(sp)
+ ldw r18, 72(sp)
+ ldw r19, 76(sp)
+ ldw r20, 80(sp)
+ ldw r21, 84(sp)
+ ldw r22, 88(sp)
+ ldw r23, 92(sp)
+ /* Does not need to restore et */
+ ldw gp, 104(sp)
+
+ ldw fp, 112(sp)
+ ldw ea, 116(sp)
+ ldw ra, 120(sp)
+ ldw sp, 108(sp) /* last restore sp */
+ eret
+
+.set at
+.set break
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
new file mode 100644
index 000000000000..f5b74ae69b5b
--- /dev/null
+++ b/arch/nios2/kernel/irq.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * based on irq.c from m68k which is:
+ *
+ * Copyright (C) 2007 Greg Ungerer <gerg@snapgear.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+static u32 ienable;
+
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
+{
+ struct pt_regs *oldregs = set_irq_regs(regs);
+ int irq;
+
+ irq_enter();
+ irq = irq_find_mapping(NULL, hwirq);
+ generic_handle_irq(irq);
+ irq_exit();
+
+ set_irq_regs(oldregs);
+}
+
+static void chip_unmask(struct irq_data *d)
+{
+ ienable |= (1 << d->hwirq);
+ WRCTL(CTL_IENABLE, ienable);
+}
+
+static void chip_mask(struct irq_data *d)
+{
+ ienable &= ~(1 << d->hwirq);
+ WRCTL(CTL_IENABLE, ienable);
+}
+
+static struct irq_chip m_irq_chip = {
+ .name = "NIOS2-INTC",
+ .irq_unmask = chip_unmask,
+ .irq_mask = chip_mask,
+};
+
+static int irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &m_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static struct irq_domain_ops irq_ops = {
+ .map = irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+void __init init_IRQ(void)
+{
+ struct irq_domain *domain;
+ struct device_node *node;
+
+ node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.0");
+ if (!node)
+ node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.1");
+
+ BUG_ON(!node);
+
+ domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
+ BUG_ON(!domain);
+
+ irq_set_default_host(domain);
+ of_node_put(node);
+ /* Load the initial ienable value */
+ ienable = RDCTL(CTL_IENABLE);
+}
diff --git a/arch/nios2/kernel/misaligned.c b/arch/nios2/kernel/misaligned.c
new file mode 100644
index 000000000000..4e5907a0cabe
--- /dev/null
+++ b/arch/nios2/kernel/misaligned.c
@@ -0,0 +1,256 @@
+/*
+ * linux/arch/nios2/kernel/misaligned.c
+ *
+ * basic emulation for mis-aligned accesses on the NIOS II cpu
+ * modelled after the version for arm in arm/alignment.c
+ *
+ * Brad Parker <brad@heeltoe.com>
+ * Copyright (C) 2010 Ambient Corporation
+ * Copyright (c) 2010 Altera Corporation, San Jose, California, USA.
+ * Copyright (c) 2010 Arrow Electronics, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+#include <asm/traps.h>
+#include <asm/unaligned.h>
+
+/* instructions we emulate */
+#define INST_LDHU 0x0b
+#define INST_STH 0x0d
+#define INST_LDH 0x0f
+#define INST_STW 0x15
+#define INST_LDW 0x17
+
+static unsigned long ma_user, ma_kern, ma_skipped, ma_half, ma_word;
+
+static unsigned int ma_usermode;
+#define UM_WARN 0x01
+#define UM_FIXUP 0x02
+#define UM_SIGNAL 0x04
+#define KM_WARN 0x08
+
+/* see arch/nios2/include/asm/ptrace.h */
+static u8 sys_stack_frame_reg_offset[] = {
+ /* struct pt_regs */
+ 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 0,
+ /* struct switch_stack */
+ 16, 17, 18, 19, 20, 21, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static int reg_offsets[32];
+
+static inline u32 get_reg_val(struct pt_regs *fp, int reg)
+{
+ u8 *p = ((u8 *)fp) + reg_offsets[reg];
+
+ return *(u32 *)p;
+}
+
+static inline void put_reg_val(struct pt_regs *fp, int reg, u32 val)
+{
+ u8 *p = ((u8 *)fp) + reg_offsets[reg];
+ *(u32 *)p = val;
+}
+
+/*
+ * (mis)alignment handler
+ */
+asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
+{
+ u32 isn, addr, val;
+ int in_kernel;
+ u8 a, b, d0, d1, d2, d3;
+ u16 imm16;
+ unsigned int fault;
+
+ /* back up one instruction */
+ fp->ea -= 4;
+
+ if (fixup_exception(fp)) {
+ ma_skipped++;
+ return;
+ }
+
+ in_kernel = !user_mode(fp);
+
+ isn = *(unsigned long *)(fp->ea);
+
+ fault = 0;
+
+ /* do fixup if in kernel or mode turned on */
+ if (in_kernel || (ma_usermode & UM_FIXUP)) {
+ /* decompose instruction */
+ a = (isn >> 27) & 0x1f;
+ b = (isn >> 22) & 0x1f;
+ imm16 = (isn >> 6) & 0xffff;
+ addr = get_reg_val(fp, a) + imm16;
+
+ /* do fixup to saved registers */
+ switch (isn & 0x3f) {
+ case INST_LDHU:
+ fault |= __get_user(d0, (u8 *)(addr+0));
+ fault |= __get_user(d1, (u8 *)(addr+1));
+ val = (d1 << 8) | d0;
+ put_reg_val(fp, b, val);
+ ma_half++;
+ break;
+ case INST_STH:
+ val = get_reg_val(fp, b);
+ d1 = val >> 8;
+ d0 = val >> 0;
+
+ pr_debug("sth: ra=%d (%08x) rb=%d (%08x), imm16 %04x addr %08x val %08x\n",
+ a, get_reg_val(fp, a),
+ b, get_reg_val(fp, b),
+ imm16, addr, val);
+
+ if (in_kernel) {
+ *(u8 *)(addr+0) = d0;
+ *(u8 *)(addr+1) = d1;
+ } else {
+ fault |= __put_user(d0, (u8 *)(addr+0));
+ fault |= __put_user(d1, (u8 *)(addr+1));
+ }
+ ma_half++;
+ break;
+ case INST_LDH:
+ fault |= __get_user(d0, (u8 *)(addr+0));
+ fault |= __get_user(d1, (u8 *)(addr+1));
+ val = (short)((d1 << 8) | d0);
+ put_reg_val(fp, b, val);
+ ma_half++;
+ break;
+ case INST_STW:
+ val = get_reg_val(fp, b);
+ d3 = val >> 24;
+ d2 = val >> 16;
+ d1 = val >> 8;
+ d0 = val >> 0;
+ if (in_kernel) {
+ *(u8 *)(addr+0) = d0;
+ *(u8 *)(addr+1) = d1;
+ *(u8 *)(addr+2) = d2;
+ *(u8 *)(addr+3) = d3;
+ } else {
+ fault |= __put_user(d0, (u8 *)(addr+0));
+ fault |= __put_user(d1, (u8 *)(addr+1));
+ fault |= __put_user(d2, (u8 *)(addr+2));
+ fault |= __put_user(d3, (u8 *)(addr+3));
+ }
+ ma_word++;
+ break;
+ case INST_LDW:
+ fault |= __get_user(d0, (u8 *)(addr+0));
+ fault |= __get_user(d1, (u8 *)(addr+1));
+ fault |= __get_user(d2, (u8 *)(addr+2));
+ fault |= __get_user(d3, (u8 *)(addr+3));
+ val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0;
+ put_reg_val(fp, b, val);
+ ma_word++;
+ break;
+ }
+ }
+
+ addr = RDCTL(CTL_BADADDR);
+ cause >>= 2;
+
+ if (fault) {
+ if (in_kernel) {
+ pr_err("fault during kernel misaligned fixup @ %#lx; addr 0x%08x; isn=0x%08x\n",
+ fp->ea, (unsigned int)addr,
+ (unsigned int)isn);
+ } else {
+ pr_err("fault during user misaligned fixup @ %#lx; isn=%08x addr=0x%08x sp=0x%08lx pid=%d\n",
+ fp->ea,
+ (unsigned int)isn, addr, fp->sp,
+ current->pid);
+
+ _exception(SIGSEGV, fp, SEGV_MAPERR, fp->ea);
+ return;
+ }
+ }
+
+ /*
+ * kernel mode -
+ * note exception and skip bad instruction (return)
+ */
+ if (in_kernel) {
+ ma_kern++;
+ fp->ea += 4;
+
+ if (ma_usermode & KM_WARN) {
+ pr_err("kernel unaligned access @ %#lx; BADADDR 0x%08x; cause=%d, isn=0x%08x\n",
+ fp->ea,
+ (unsigned int)addr, cause,
+ (unsigned int)isn);
+ /* show_regs(fp); */
+ }
+
+ return;
+ }
+
+ ma_user++;
+
+ /*
+ * user mode -
+ * possibly warn,
+ * possibly send SIGBUS signal to process
+ */
+ if (ma_usermode & UM_WARN) {
+ pr_err("user unaligned access @ %#lx; isn=0x%08lx ea=0x%08lx ra=0x%08lx sp=0x%08lx\n",
+ (unsigned long)addr, (unsigned long)isn,
+ fp->ea, fp->ra, fp->sp);
+ }
+
+ if (ma_usermode & UM_SIGNAL)
+ _exception(SIGBUS, fp, BUS_ADRALN, fp->ea);
+ else
+ fp->ea += 4; /* else advance */
+}
+
+static void __init misaligned_calc_reg_offsets(void)
+{
+ int i, r, offset;
+
+ /* pre-calc offsets of registers on sys call stack frame */
+ offset = 0;
+
+ /* struct pt_regs */
+ for (i = 0; i < 16; i++) {
+ r = sys_stack_frame_reg_offset[i];
+ reg_offsets[r] = offset;
+ offset += 4;
+ }
+
+ /* struct switch_stack */
+ offset = -sizeof(struct switch_stack);
+ for (i = 16; i < 32; i++) {
+ r = sys_stack_frame_reg_offset[i];
+ reg_offsets[r] = offset;
+ offset += 4;
+ }
+}
+
+
+static int __init misaligned_init(void)
+{
+ /* default mode - silent fix */
+ ma_usermode = UM_FIXUP | KM_WARN;
+
+ misaligned_calc_reg_offsets();
+
+ return 0;
+}
+
+fs_initcall(misaligned_init);
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
new file mode 100644
index 000000000000..cc924a38f22a
--- /dev/null
+++ b/arch/nios2/kernel/module.c
@@ -0,0 +1,138 @@
+/*
+ * Kernel module support for Nios II.
+ *
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ * Written by Wentao Xu <xuwentao@microtronix.com>
+ * Copyright (C) 2001, 2003 Rusty Russell
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Modules should NOT be allocated with kmalloc for (obvious) reasons.
+ * But we do it for now to avoid relocation issues. CALL26/PCREL26 cannot reach
+ * from 0x80000000 (vmalloc area) to 0xc00000000 (kernel) (kmalloc returns
+ * addresses in 0xc0000000)
+ */
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return kmalloc(size, GFP_KERNEL);
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ kfree(module_region);
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *mod)
+{
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+ /* This is where to make the change */
+ uint32_t word;
+ uint32_t *loc
+ = ((void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rela[i].r_offset);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ Elf32_Sym *sym
+ = ((Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rela[i].r_info));
+ uint32_t v = sym->st_value + rela[i].r_addend;
+
+ pr_debug("reltype %d 0x%x name:<%s>\n",
+ ELF32_R_TYPE(rela[i].r_info),
+ rela[i].r_offset, strtab + sym->st_name);
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_NIOS2_NONE:
+ break;
+ case R_NIOS2_BFD_RELOC_32:
+ *loc += v;
+ break;
+ case R_NIOS2_PCREL16:
+ v -= (uint32_t)loc + 4;
+ if ((int32_t)v > 0x7fff ||
+ (int32_t)v < -(int32_t)0x8000) {
+ pr_err("module %s: relocation overflow\n",
+ mod->name);
+ return -ENOEXEC;
+ }
+ word = *loc;
+ *loc = ((((word >> 22) << 16) | (v & 0xffff)) << 6) |
+ (word & 0x3f);
+ break;
+ case R_NIOS2_CALL26:
+ if (v & 3) {
+ pr_err("module %s: dangerous relocation\n",
+ mod->name);
+ return -ENOEXEC;
+ }
+ if ((v >> 28) != ((uint32_t)loc >> 28)) {
+ pr_err("module %s: relocation overflow\n",
+ mod->name);
+ return -ENOEXEC;
+ }
+ *loc = (*loc & 0x3f) | ((v >> 2) << 6);
+ break;
+ case R_NIOS2_HI16:
+ word = *loc;
+ *loc = ((((word >> 22) << 16) |
+ ((v >> 16) & 0xffff)) << 6) | (word & 0x3f);
+ break;
+ case R_NIOS2_LO16:
+ word = *loc;
+ *loc = ((((word >> 22) << 16) | (v & 0xffff)) << 6) |
+ (word & 0x3f);
+ break;
+ case R_NIOS2_HIADJ16:
+ {
+ Elf32_Addr word2;
+
+ word = *loc;
+ word2 = ((v >> 16) + ((v >> 15) & 1)) & 0xffff;
+ *loc = ((((word >> 22) << 16) | word2) << 6) |
+ (word & 0x3f);
+ }
+ break;
+
+ default:
+ pr_err("module %s: Unknown reloc: %u\n",
+ mod->name, ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ flush_cache_all();
+ return 0;
+}
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
new file mode 100644
index 000000000000..bf2f55d10a4d
--- /dev/null
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+
+/* string functions */
+
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
+
+DECLARE_EXPORT(__gcc_bcmp);
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__moddi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__udivmoddi4);
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__umoddi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__muldi3);
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
new file mode 100644
index 000000000000..0e075b5ad2a5
--- /dev/null
+++ b/arch/nios2/kernel/process.c
@@ -0,0 +1,258 @@
+/*
+ * Architecture-dependent parts of process handling.
+ *
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/uaccess.h>
+
+#include <asm/unistd.h>
+#include <asm/traps.h>
+#include <asm/cpuinfo.h>
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+void arch_cpu_idle(void)
+{
+ local_irq_enable();
+}
+
+/*
+ * The development boards have no way to pull a board reset. Just jump to the
+ * cpu reset address and let the boot loader or the code in head.S take care of
+ * resetting peripherals.
+ */
+void machine_restart(char *__unused)
+{
+ pr_notice("Machine restart (%08x)...\n", cpuinfo.reset_addr);
+ local_irq_disable();
+ __asm__ __volatile__ (
+ "jmp %0\n\t"
+ :
+ : "r" (cpuinfo.reset_addr)
+ : "r4");
+}
+
+void machine_halt(void)
+{
+ pr_notice("Machine halt...\n");
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+/*
+ * There is no way to power off the development boards. So just spin for now. If
+ * we ever have a way of resetting a board using a GPIO we should add that here.
+ */
+void machine_power_off(void)
+{
+ pr_notice("Machine power off...\n");
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ pr_notice("\n");
+ show_regs_print_info(KERN_DEFAULT);
+
+ pr_notice("r1: %08lx r2: %08lx r3: %08lx r4: %08lx\n",
+ regs->r1, regs->r2, regs->r3, regs->r4);
+
+ pr_notice("r5: %08lx r6: %08lx r7: %08lx r8: %08lx\n",
+ regs->r5, regs->r6, regs->r7, regs->r8);
+
+ pr_notice("r9: %08lx r10: %08lx r11: %08lx r12: %08lx\n",
+ regs->r9, regs->r10, regs->r11, regs->r12);
+
+ pr_notice("r13: %08lx r14: %08lx r15: %08lx\n",
+ regs->r13, regs->r14, regs->r15);
+
+ pr_notice("ra: %08lx fp: %08lx sp: %08lx gp: %08lx\n",
+ regs->ra, regs->fp, regs->sp, regs->gp);
+
+ pr_notice("ea: %08lx estatus: %08lx\n",
+ regs->ea, regs->estatus);
+}
+
+void flush_thread(void)
+{
+ set_fs(USER_DS);
+}
+
+int copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long arg, struct task_struct *p)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+ struct pt_regs *regs;
+ struct switch_stack *stack;
+ struct switch_stack *childstack =
+ ((struct switch_stack *)childregs) - 1;
+
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childstack, 0,
+ sizeof(struct switch_stack) + sizeof(struct pt_regs));
+
+ childstack->r16 = usp; /* fn */
+ childstack->r17 = arg;
+ childstack->ra = (unsigned long) ret_from_kernel_thread;
+ childregs->estatus = STATUS_PIE;
+ childregs->sp = (unsigned long) childstack;
+
+ p->thread.ksp = (unsigned long) childstack;
+ p->thread.kregs = childregs;
+ return 0;
+ }
+
+ regs = current_pt_regs();
+ *childregs = *regs;
+ childregs->r2 = 0; /* Set the return value for the child. */
+ childregs->r7 = 0;
+
+ stack = ((struct switch_stack *) regs) - 1;
+ *childstack = *stack;
+ childstack->ra = (unsigned long)ret_from_fork;
+ p->thread.kregs = childregs;
+ p->thread.ksp = (unsigned long) childstack;
+
+ if (usp)
+ childregs->sp = usp;
+
+ /* Initialize tls register. */
+ if (clone_flags & CLONE_SETTLS)
+ childstack->r23 = regs->r8;
+
+ return 0;
+}
+
+/*
+ * Generic dumping code. Used for panic and debug.
+ */
+void dump(struct pt_regs *fp)
+{
+ unsigned long *sp;
+ unsigned char *tp;
+ int i;
+
+ pr_emerg("\nCURRENT PROCESS:\n\n");
+ pr_emerg("COMM=%s PID=%d\n", current->comm, current->pid);
+
+ if (current->mm) {
+ pr_emerg("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
+ (int) current->mm->start_code,
+ (int) current->mm->end_code,
+ (int) current->mm->start_data,
+ (int) current->mm->end_data,
+ (int) current->mm->end_data,
+ (int) current->mm->brk);
+ pr_emerg("USER-STACK=%08x KERNEL-STACK=%08x\n\n",
+ (int) current->mm->start_stack,
+ (int)(((unsigned long) current) + THREAD_SIZE));
+ }
+
+ pr_emerg("PC: %08lx\n", fp->ea);
+ pr_emerg("SR: %08lx SP: %08lx\n",
+ (long) fp->estatus, (long) fp);
+
+ pr_emerg("r1: %08lx r2: %08lx r3: %08lx\n",
+ fp->r1, fp->r2, fp->r3);
+
+ pr_emerg("r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
+ fp->r4, fp->r5, fp->r6, fp->r7);
+ pr_emerg("r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
+ fp->r8, fp->r9, fp->r10, fp->r11);
+ pr_emerg("r12: %08lx r13: %08lx r14: %08lx r15: %08lx\n",
+ fp->r12, fp->r13, fp->r14, fp->r15);
+ pr_emerg("or2: %08lx ra: %08lx fp: %08lx sp: %08lx\n",
+ fp->orig_r2, fp->ra, fp->fp, fp->sp);
+ pr_emerg("\nUSP: %08x TRAPFRAME: %08x\n",
+ (unsigned int) fp->sp, (unsigned int) fp);
+
+ pr_emerg("\nCODE:");
+ tp = ((unsigned char *) fp->ea) - 0x20;
+ for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
+ if ((i % 0x10) == 0)
+ pr_emerg("\n%08x: ", (int) (tp + i));
+ pr_emerg("%08x ", (int) *sp++);
+ }
+ pr_emerg("\n");
+
+ pr_emerg("\nKERNEL STACK:");
+ tp = ((unsigned char *) fp) - 0x40;
+ for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
+ if ((i % 0x10) == 0)
+ pr_emerg("\n%08x: ", (int) (tp + i));
+ pr_emerg("%08x ", (int) *sp++);
+ }
+ pr_emerg("\n");
+ pr_emerg("\n");
+
+ pr_emerg("\nUSER STACK:");
+ tp = (unsigned char *) (fp->sp - 0x10);
+ for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
+ if ((i % 0x10) == 0)
+ pr_emerg("\n%08x: ", (int) (tp + i));
+ pr_emerg("%08x ", (int) *sp++);
+ }
+ pr_emerg("\n\n");
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, pc;
+ unsigned long stack_page;
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)p;
+ fp = ((struct switch_stack *)p->thread.ksp)->fp; /* ;dgt2 */
+ do {
+ if (fp < stack_page+sizeof(struct task_struct) ||
+ fp >= 8184+stack_page) /* ;dgt2;tmp */
+ return 0;
+ pc = ((unsigned long *)fp)[1];
+ if (!in_sched_functions(pc))
+ return pc;
+ fp = *(unsigned long *) fp;
+ } while (count++ < 16); /* ;dgt2;tmp */
+ return 0;
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Will startup in user mode (status_extension = 0).
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ memset((void *) regs, 0, sizeof(struct pt_regs));
+ regs->estatus = ESTATUS_EPIE | ESTATUS_EU;
+ regs->ea = pc;
+ regs->sp = sp;
+}
+
+#include <linux/elfcore.h>
+
+/* Fill in the FPU structure for a core dump. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+{
+ return 0; /* Nios2 has no FPU and thus no FPU registers */
+}
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
new file mode 100644
index 000000000000..0522d3378e3f
--- /dev/null
+++ b/arch/nios2/kernel/prom.c
@@ -0,0 +1,65 @@
+/*
+ * Device tree support
+ *
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/io.h>
+
+#include <asm/sections.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ u64 kernel_start = (u64)virt_to_phys(_text);
+
+ if (!memory_size &&
+ (kernel_start >= base) && (kernel_start < (base + size)))
+ memory_size = size;
+
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return alloc_bootmem_align(size, align);
+}
+
+void __init early_init_devtree(void *params)
+{
+ __be32 *dtb = (u32 *)__dtb_start;
+#if defined(CONFIG_NIOS2_DTB_AT_PHYS_ADDR)
+ if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
+ OF_DT_HEADER) {
+ params = (void *)CONFIG_NIOS2_DTB_PHYS_ADDR;
+ early_init_dt_scan(params);
+ return;
+ }
+#endif
+ if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
+ params = (void *)__dtb_start;
+
+ early_init_dt_scan(params);
+}
diff --git a/arch/nios2/kernel/ptrace.c b/arch/nios2/kernel/ptrace.c
new file mode 100644
index 000000000000..681dda92eff1
--- /dev/null
+++ b/arch/nios2/kernel/ptrace.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2014 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ const struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ int ret = 0;
+
+#define REG_O_ZERO_RANGE(START, END) \
+ if (!ret) \
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \
+ START * 4, (END * 4) + 4);
+
+#define REG_O_ONE(PTR, LOC) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ LOC * 4, (LOC * 4) + 4);
+
+#define REG_O_RANGE(PTR, START, END) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ START * 4, (END * 4) + 4);
+
+ REG_O_ZERO_RANGE(PTR_R0, PTR_R0);
+ REG_O_RANGE(&regs->r1, PTR_R1, PTR_R7);
+ REG_O_RANGE(&regs->r8, PTR_R8, PTR_R15);
+ REG_O_RANGE(sw, PTR_R16, PTR_R23);
+ REG_O_ZERO_RANGE(PTR_R24, PTR_R25); /* et and bt */
+ REG_O_ONE(&regs->gp, PTR_GP);
+ REG_O_ONE(&regs->sp, PTR_SP);
+ REG_O_ONE(&regs->fp, PTR_FP);
+ REG_O_ONE(&regs->ea, PTR_EA);
+ REG_O_ZERO_RANGE(PTR_BA, PTR_BA);
+ REG_O_ONE(&regs->ra, PTR_RA);
+ REG_O_ONE(&regs->ea, PTR_PC); /* use ea for PC */
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ PTR_STATUS * 4, -1);
+
+ return ret;
+}
+
+/*
+ * Set the thread state from a regset passed in via ptrace
+ */
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ const struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ int ret = 0;
+
+#define REG_IGNORE_RANGE(START, END) \
+ if (!ret) \
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ START * 4, (END * 4) + 4);
+
+#define REG_IN_ONE(PTR, LOC) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), LOC * 4, (LOC * 4) + 4);
+
+#define REG_IN_RANGE(PTR, START, END) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), START * 4, (END * 4) + 4);
+
+ REG_IGNORE_RANGE(PTR_R0, PTR_R0);
+ REG_IN_RANGE(&regs->r1, PTR_R1, PTR_R7);
+ REG_IN_RANGE(&regs->r8, PTR_R8, PTR_R15);
+ REG_IN_RANGE(sw, PTR_R16, PTR_R23);
+ REG_IGNORE_RANGE(PTR_R24, PTR_R25); /* et and bt */
+ REG_IN_ONE(&regs->gp, PTR_GP);
+ REG_IN_ONE(&regs->sp, PTR_SP);
+ REG_IN_ONE(&regs->fp, PTR_FP);
+ REG_IN_ONE(&regs->ea, PTR_EA);
+ REG_IGNORE_RANGE(PTR_BA, PTR_BA);
+ REG_IN_ONE(&regs->ra, PTR_RA);
+ REG_IN_ONE(&regs->ea, PTR_PC); /* use ea for PC */
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ PTR_STATUS * 4, -1);
+
+ return ret;
+}
+
+/*
+ * Define the register sets available on Nios2 under Linux
+ */
+enum nios2_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset nios2_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = NUM_PTRACE_REG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = genregs_get,
+ .set = genregs_set,
+ }
+};
+
+static const struct user_regset_view nios2_user_view = {
+ .name = "nios2",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = nios2_regsets,
+ .n = ARRAY_SIZE(nios2_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &nios2_user_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+
+}
+
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+ unsigned long data)
+{
+ return ptrace_request(child, request, addr, data);
+}
+
+asmlinkage int do_syscall_trace_enter(void)
+{
+ int ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(task_pt_regs(current));
+
+ return ret;
+}
+
+asmlinkage void do_syscall_trace_exit(void)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(task_pt_regs(current), 0);
+}
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
new file mode 100644
index 000000000000..cb3121f975d4
--- /dev/null
+++ b/arch/nios2/kernel/setup.c
@@ -0,0 +1,218 @@
+/*
+ * Nios2-specific parts of system setup
+ *
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ * Copyright (C) 2001 Vic Phillips <vic@microtronix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/initrd.h>
+#include <linux/of_fdt.h>
+
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/cpuinfo.h>
+
+unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+
+unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
+
+unsigned long memory_size;
+
+static struct pt_regs fake_regs = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0};
+
+/* Copy a short hook instruction sequence to the exception address */
+static inline void copy_exception_handler(unsigned int addr)
+{
+ unsigned int start = (unsigned int) exception_handler_hook;
+ volatile unsigned int tmp = 0;
+
+ if (start == addr) {
+ /* The CPU exception address already points to the handler. */
+ return;
+ }
+
+ __asm__ __volatile__ (
+ "ldw %2,0(%0)\n"
+ "stw %2,0(%1)\n"
+ "ldw %2,4(%0)\n"
+ "stw %2,4(%1)\n"
+ "ldw %2,8(%0)\n"
+ "stw %2,8(%1)\n"
+ "flushd 0(%1)\n"
+ "flushd 4(%1)\n"
+ "flushd 8(%1)\n"
+ "flushi %1\n"
+ "addi %1,%1,4\n"
+ "flushi %1\n"
+ "addi %1,%1,4\n"
+ "flushi %1\n"
+ "flushp\n"
+ : /* no output registers */
+ : "r" (start), "r" (addr), "r" (tmp)
+ : "memory"
+ );
+}
+
+/* Copy the fast TLB miss handler */
+static inline void copy_fast_tlb_miss_handler(unsigned int addr)
+{
+ unsigned int start = (unsigned int) fast_handler;
+ unsigned int end = (unsigned int) fast_handler_end;
+ volatile unsigned int tmp = 0;
+
+ __asm__ __volatile__ (
+ "1:\n"
+ " ldw %3,0(%0)\n"
+ " stw %3,0(%1)\n"
+ " flushd 0(%1)\n"
+ " flushi %1\n"
+ " flushp\n"
+ " addi %0,%0,4\n"
+ " addi %1,%1,4\n"
+ " bne %0,%2,1b\n"
+ : /* no output registers */
+ : "r" (start), "r" (addr), "r" (end), "r" (tmp)
+ : "memory"
+ );
+}
+
+/*
+ * save args passed from u-boot, called from head.S
+ *
+ * @r4: NIOS magic
+ * @r5: initrd start
+ * @r6: initrd end or fdt
+ * @r7: kernel command line
+ */
+asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6,
+ unsigned r7)
+{
+ unsigned dtb_passed = 0;
+ char cmdline_passed[COMMAND_LINE_SIZE] = { 0, };
+
+#if defined(CONFIG_NIOS2_PASS_CMDLINE)
+ if (r4 == 0x534f494e) { /* r4 is magic NIOS */
+#if defined(CONFIG_BLK_DEV_INITRD)
+ if (r5) { /* initramfs */
+ initrd_start = r5;
+ initrd_end = r6;
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+ dtb_passed = r6;
+
+ if (r7)
+ strncpy(cmdline_passed, (char *)r7, COMMAND_LINE_SIZE);
+ }
+#endif
+
+ early_init_devtree((void *)dtb_passed);
+
+#ifndef CONFIG_CMDLINE_FORCE
+ if (cmdline_passed[0])
+ strncpy(boot_command_line, cmdline_passed, COMMAND_LINE_SIZE);
+#ifdef CONFIG_NIOS2_CMDLINE_IGNORE_DTB
+ else
+ strncpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif
+#endif
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ int bootmap_size;
+
+ console_verbose();
+
+ memory_start = PAGE_ALIGN((unsigned long)__pa(_end));
+ memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size;
+
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+ init_task.thread.kregs = &fake_regs;
+
+ /* Keep a copy of command line */
+ *cmdline_p = boot_command_line;
+
+ min_low_pfn = PFN_UP(memory_start);
+ max_low_pfn = PFN_DOWN(memory_end);
+ max_mapnr = max_low_pfn;
+
+ /*
+ * give all the memory to the bootmap allocator, tell it to put the
+ * boot mem_map at the start of memory
+ */
+ pr_debug("init_bootmem_node(?,%#lx, %#x, %#lx)\n",
+ min_low_pfn, PFN_DOWN(PHYS_OFFSET), max_low_pfn);
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ min_low_pfn, PFN_DOWN(PHYS_OFFSET),
+ max_low_pfn);
+
+ /*
+ * free the usable memory, we have to make sure we do not free
+ * the bootmem bitmap so we then reserve it after freeing it :-)
+ */
+ pr_debug("free_bootmem(%#lx, %#lx)\n",
+ memory_start, memory_end - memory_start);
+ free_bootmem(memory_start, memory_end - memory_start);
+
+ /*
+ * Reserve the bootmem bitmap itself as well. We do this in two
+ * steps (first step was init_bootmem()) because this catches
+ * the (very unlikely) case of us accidentally initializing the
+ * bootmem allocator with an invalid RAM area.
+ *
+ * Arguments are start, size
+ */
+ pr_debug("reserve_bootmem(%#lx, %#x)\n", memory_start, bootmap_size);
+ reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ reserve_bootmem(virt_to_phys((void *)initrd_start),
+ initrd_end - initrd_start, BOOTMEM_DEFAULT);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ unflatten_and_copy_device_tree();
+
+ setup_cpuinfo();
+
+ copy_exception_handler(cpuinfo.exception_addr);
+
+ mmu_init();
+
+ copy_fast_tlb_miss_handler(cpuinfo.fast_tlb_miss_exc_addr);
+
+ /*
+ * Initialize MMU context handling here because data from cpuinfo is
+ * needed for this.
+ */
+ mmu_context_init();
+
+ /*
+ * get kmalloc into gear
+ */
+ paging_init();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+}
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
new file mode 100644
index 000000000000..f9d27883a714
--- /dev/null
+++ b/arch/nios2/kernel/signal.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2013-2014 Altera Corporation
+ * Copyright (C) 2011-2012 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <linux/personality.h>
+#include <linux/tracehook.h>
+
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+static inline int rt_restore_ucontext(struct pt_regs *regs,
+ struct switch_stack *sw,
+ struct ucontext *uc, int *pr2)
+{
+ int temp;
+ greg_t *gregs = uc->uc_mcontext.gregs;
+ int err;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ err = __get_user(temp, &uc->uc_mcontext.version);
+ if (temp != MCONTEXT_VERSION)
+ goto badframe;
+ /* restore passed registers */
+ err |= __get_user(regs->r1, &gregs[0]);
+ err |= __get_user(regs->r2, &gregs[1]);
+ err |= __get_user(regs->r3, &gregs[2]);
+ err |= __get_user(regs->r4, &gregs[3]);
+ err |= __get_user(regs->r5, &gregs[4]);
+ err |= __get_user(regs->r6, &gregs[5]);
+ err |= __get_user(regs->r7, &gregs[6]);
+ err |= __get_user(regs->r8, &gregs[7]);
+ err |= __get_user(regs->r9, &gregs[8]);
+ err |= __get_user(regs->r10, &gregs[9]);
+ err |= __get_user(regs->r11, &gregs[10]);
+ err |= __get_user(regs->r12, &gregs[11]);
+ err |= __get_user(regs->r13, &gregs[12]);
+ err |= __get_user(regs->r14, &gregs[13]);
+ err |= __get_user(regs->r15, &gregs[14]);
+ err |= __get_user(sw->r16, &gregs[15]);
+ err |= __get_user(sw->r17, &gregs[16]);
+ err |= __get_user(sw->r18, &gregs[17]);
+ err |= __get_user(sw->r19, &gregs[18]);
+ err |= __get_user(sw->r20, &gregs[19]);
+ err |= __get_user(sw->r21, &gregs[20]);
+ err |= __get_user(sw->r22, &gregs[21]);
+ err |= __get_user(sw->r23, &gregs[22]);
+ /* gregs[23] is handled below */
+ err |= __get_user(sw->fp, &gregs[24]); /* Verify, should this be
+ settable */
+ err |= __get_user(sw->gp, &gregs[25]); /* Verify, should this be
+ settable */
+
+ err |= __get_user(temp, &gregs[26]); /* Not really necessary no user
+ settable bits */
+ err |= __get_user(regs->ea, &gregs[27]);
+
+ err |= __get_user(regs->ra, &gregs[23]);
+ err |= __get_user(regs->sp, &gregs[28]);
+
+ regs->orig_r2 = -1; /* disable syscall checks */
+
+ err |= restore_altstack(&uc->uc_stack);
+ if (err)
+ goto badframe;
+
+ *pr2 = regs->r2;
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage int do_rt_sigreturn(struct switch_stack *sw)
+{
+ struct pt_regs *regs = (struct pt_regs *)(sw + 1);
+ /* Verify, can we follow the stack back */
+ struct rt_sigframe *frame = (struct rt_sigframe *) regs->sp;
+ sigset_t set;
+ int rval;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (rt_restore_ucontext(regs, sw, &frame->uc, &rval))
+ goto badframe;
+
+ return rval;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
+{
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ greg_t *gregs = uc->uc_mcontext.gregs;
+ int err = 0;
+
+ err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+ err |= __put_user(regs->r1, &gregs[0]);
+ err |= __put_user(regs->r2, &gregs[1]);
+ err |= __put_user(regs->r3, &gregs[2]);
+ err |= __put_user(regs->r4, &gregs[3]);
+ err |= __put_user(regs->r5, &gregs[4]);
+ err |= __put_user(regs->r6, &gregs[5]);
+ err |= __put_user(regs->r7, &gregs[6]);
+ err |= __put_user(regs->r8, &gregs[7]);
+ err |= __put_user(regs->r9, &gregs[8]);
+ err |= __put_user(regs->r10, &gregs[9]);
+ err |= __put_user(regs->r11, &gregs[10]);
+ err |= __put_user(regs->r12, &gregs[11]);
+ err |= __put_user(regs->r13, &gregs[12]);
+ err |= __put_user(regs->r14, &gregs[13]);
+ err |= __put_user(regs->r15, &gregs[14]);
+ err |= __put_user(sw->r16, &gregs[15]);
+ err |= __put_user(sw->r17, &gregs[16]);
+ err |= __put_user(sw->r18, &gregs[17]);
+ err |= __put_user(sw->r19, &gregs[18]);
+ err |= __put_user(sw->r20, &gregs[19]);
+ err |= __put_user(sw->r21, &gregs[20]);
+ err |= __put_user(sw->r22, &gregs[21]);
+ err |= __put_user(sw->r23, &gregs[22]);
+ err |= __put_user(regs->ra, &gregs[23]);
+ err |= __put_user(sw->fp, &gregs[24]);
+ err |= __put_user(sw->gp, &gregs[25]);
+ err |= __put_user(regs->ea, &gregs[27]);
+ err |= __put_user(regs->sp, &gregs[28]);
+ return err;
+}
+
+static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = regs->sp;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ usp = sigsp(usp, ksig);
+
+ /* Verify, is it 32 or 64 bit aligned */
+ return (void *)((usp - frame_size) & -8UL);
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= rt_setup_ucontext(&frame->uc, regs);
+ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace; jump to fixed address sigreturn
+ trampoline on kuser page. */
+ regs->ra = (unsigned long) (0x1040);
+
+ /* Set up registers for signal handler */
+ regs->sp = (unsigned long) frame;
+ regs->r4 = (unsigned long) ksig->sig;
+ regs->r5 = (unsigned long) &frame->info;
+ regs->r6 = (unsigned long) &frame->uc;
+ regs->ea = (unsigned long) ksig->ka.sa.sa_handler;
+ return 0;
+
+give_sigsegv:
+ force_sigsegv(ksig->sig, current);
+ return -EFAULT;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+ sigset_t *oldset = sigmask_to_save();
+
+ /* set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static int do_signal(struct pt_regs *regs)
+{
+ unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
+ int restart = 0;
+ struct ksignal ksig;
+
+ current->thread.kregs = regs;
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (regs->orig_r2 >= 0) {
+ continue_addr = regs->ea;
+ restart_addr = continue_addr - 4;
+ retval = regs->r2;
+
+ /*
+ * Prepare for system call restart. We do this here so that a
+ * debugger will see the already changed PC.
+ */
+ switch (retval) {
+ case ERESTART_RESTARTBLOCK:
+ restart = -2;
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
+ restart++;
+ regs->r2 = regs->orig_r2;
+ regs->r7 = regs->orig_r7;
+ regs->ea = restart_addr;
+ break;
+ }
+ }
+
+ if (get_signal(&ksig)) {
+ /* handler */
+ if (unlikely(restart && regs->ea == restart_addr)) {
+ if (retval == ERESTARTNOHAND ||
+ retval == ERESTART_RESTARTBLOCK ||
+ (retval == ERESTARTSYS
+ && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
+ regs->r2 = EINTR;
+ regs->r7 = 1;
+ regs->ea = continue_addr;
+ }
+ }
+ handle_signal(&ksig, regs);
+ return 0;
+ }
+
+ /*
+ * No handler present
+ */
+ if (unlikely(restart) && regs->ea == restart_addr) {
+ regs->ea = continue_addr;
+ regs->r2 = __NR_restart_syscall;
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask back.
+ */
+ restore_saved_sigmask();
+
+ return restart;
+}
+
+asmlinkage int do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * We want the common case to go fast, which is why we may in certain
+ * cases get here from kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 0;
+
+ if (test_thread_flag(TIF_SIGPENDING)) {
+ int restart = do_signal(regs);
+
+ if (unlikely(restart)) {
+ /*
+ * Restart without handlers.
+ * Deal with it without leaving
+ * the kernel space.
+ */
+ return restart;
+ }
+ } else if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+
+ return 0;
+}
diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c
new file mode 100644
index 000000000000..cd390ec4f88b
--- /dev/null
+++ b/arch/nios2/kernel/sys_nios2.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011-2012 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+/* sys_cacheflush -- flush the processor cache. */
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
+ unsigned int op)
+{
+ struct vm_area_struct *vma;
+
+ if (len == 0)
+ return 0;
+
+ /* We only support op 0 now, return error if op is non-zero.*/
+ if (op)
+ return -EINVAL;
+
+ /* Check for overflow */
+ if (addr + len < addr)
+ return -EFAULT;
+
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ vma = find_vma(current->mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
+ return -EFAULT;
+
+ flush_cache_range(vma, addr, addr + len);
+
+ return 0;
+}
+
+asmlinkage int sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c
new file mode 100644
index 000000000000..06e6ac1835b2
--- /dev/null
+++ b/arch/nios2/kernel/syscall_table.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright Altera Corporation (C) 2013. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+#include <asm/unistd.h>
+};
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
new file mode 100644
index 000000000000..7f4547418ee1
--- /dev/null
+++ b/arch/nios2/kernel/time.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2013-2014 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define ALTERA_TIMER_STATUS_REG 0
+#define ALTERA_TIMER_CONTROL_REG 4
+#define ALTERA_TIMER_PERIODL_REG 8
+#define ALTERA_TIMER_PERIODH_REG 12
+#define ALTERA_TIMER_SNAPL_REG 16
+#define ALTERA_TIMER_SNAPH_REG 20
+
+#define ALTERA_TIMER_CONTROL_ITO_MSK (0x1)
+#define ALTERA_TIMER_CONTROL_CONT_MSK (0x2)
+#define ALTERA_TIMER_CONTROL_START_MSK (0x4)
+#define ALTERA_TIMER_CONTROL_STOP_MSK (0x8)
+
+struct nios2_timer {
+ void __iomem *base;
+ unsigned long freq;
+};
+
+struct nios2_clockevent_dev {
+ struct nios2_timer timer;
+ struct clock_event_device ced;
+};
+
+struct nios2_clocksource {
+ struct nios2_timer timer;
+ struct clocksource cs;
+};
+
+static inline struct nios2_clockevent_dev *
+ to_nios2_clkevent(struct clock_event_device *evt)
+{
+ return container_of(evt, struct nios2_clockevent_dev, ced);
+}
+
+static inline struct nios2_clocksource *
+ to_nios2_clksource(struct clocksource *cs)
+{
+ return container_of(cs, struct nios2_clocksource, cs);
+}
+
+static u16 timer_readw(struct nios2_timer *timer, u32 offs)
+{
+ return readw(timer->base + offs);
+}
+
+static void timer_writew(struct nios2_timer *timer, u16 val, u32 offs)
+{
+ writew(val, timer->base + offs);
+}
+
+static inline unsigned long read_timersnapshot(struct nios2_timer *timer)
+{
+ unsigned long count;
+
+ timer_writew(timer, 0, ALTERA_TIMER_SNAPL_REG);
+ count = timer_readw(timer, ALTERA_TIMER_SNAPH_REG) << 16 |
+ timer_readw(timer, ALTERA_TIMER_SNAPL_REG);
+
+ return count;
+}
+
+static cycle_t nios2_timer_read(struct clocksource *cs)
+{
+ struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs);
+ unsigned long flags;
+ u32 count;
+
+ local_irq_save(flags);
+ count = read_timersnapshot(&nios2_cs->timer);
+ local_irq_restore(flags);
+
+ /* Counter is counting down */
+ return ~count;
+}
+
+static struct nios2_clocksource nios2_cs = {
+ .cs = {
+ .name = "nios2-clksrc",
+ .rating = 250,
+ .read = nios2_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+};
+
+cycles_t get_cycles(void)
+{
+ return nios2_timer_read(&nios2_cs.cs);
+}
+
+static void nios2_timer_start(struct nios2_timer *timer)
+{
+ u16 ctrl;
+
+ ctrl = timer_readw(timer, ALTERA_TIMER_CONTROL_REG);
+ ctrl |= ALTERA_TIMER_CONTROL_START_MSK;
+ timer_writew(timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+}
+
+static void nios2_timer_stop(struct nios2_timer *timer)
+{
+ u16 ctrl;
+
+ ctrl = timer_readw(timer, ALTERA_TIMER_CONTROL_REG);
+ ctrl |= ALTERA_TIMER_CONTROL_STOP_MSK;
+ timer_writew(timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+}
+
+static void nios2_timer_config(struct nios2_timer *timer, unsigned long period,
+ enum clock_event_mode mode)
+{
+ u16 ctrl;
+
+ /* The timer's actual period is one cycle greater than the value
+ * stored in the period register. */
+ period--;
+
+ ctrl = timer_readw(timer, ALTERA_TIMER_CONTROL_REG);
+ /* stop counter */
+ timer_writew(timer, ctrl | ALTERA_TIMER_CONTROL_STOP_MSK,
+ ALTERA_TIMER_CONTROL_REG);
+
+ /* write new count */
+ timer_writew(timer, period, ALTERA_TIMER_PERIODL_REG);
+ timer_writew(timer, period >> 16, ALTERA_TIMER_PERIODH_REG);
+
+ ctrl |= ALTERA_TIMER_CONTROL_START_MSK | ALTERA_TIMER_CONTROL_ITO_MSK;
+ if (mode == CLOCK_EVT_MODE_PERIODIC)
+ ctrl |= ALTERA_TIMER_CONTROL_CONT_MSK;
+ else
+ ctrl &= ~ALTERA_TIMER_CONTROL_CONT_MSK;
+ timer_writew(timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+}
+
+static int nios2_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+
+ nios2_timer_config(&nios2_ced->timer, delta, evt->mode);
+
+ return 0;
+}
+
+static void nios2_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long period;
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+ struct nios2_timer *timer = &nios2_ced->timer;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ period = DIV_ROUND_UP(timer->freq, HZ);
+ nios2_timer_config(timer, period, CLOCK_EVT_MODE_PERIODIC);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ nios2_timer_stop(timer);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ nios2_timer_start(timer);
+ break;
+ }
+}
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *) dev_id;
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+
+ /* Clear the interrupt condition */
+ timer_writew(&nios2_ced->timer, 0, ALTERA_TIMER_STATUS_REG);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void __init nios2_timer_get_base_and_freq(struct device_node *np,
+ void __iomem **base, u32 *freq)
+{
+ *base = of_iomap(np, 0);
+ if (!*base)
+ panic("Unable to map reg for %s\n", np->name);
+
+ if (of_property_read_u32(np, "clock-frequency", freq))
+ panic("Unable to get %s clock frequency\n", np->name);
+}
+
+static struct nios2_clockevent_dev nios2_ce = {
+ .ced = {
+ .name = "nios2-clkevent",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 250,
+ .shift = 32,
+ .set_next_event = nios2_timer_set_next_event,
+ .set_mode = nios2_timer_set_mode,
+ },
+};
+
+static __init void nios2_clockevent_init(struct device_node *timer)
+{
+ void __iomem *iobase;
+ u32 freq;
+ int irq;
+
+ nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+
+ irq = irq_of_parse_and_map(timer, 0);
+ if (!irq)
+ panic("Unable to parse timer irq\n");
+
+ nios2_ce.timer.base = iobase;
+ nios2_ce.timer.freq = freq;
+
+ nios2_ce.ced.cpumask = cpumask_of(0);
+ nios2_ce.ced.irq = irq;
+
+ nios2_timer_stop(&nios2_ce.timer);
+ /* clear pending interrupt */
+ timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
+
+ if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
+ &nios2_ce.ced))
+ panic("Unable to setup timer irq\n");
+
+ clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
+}
+
+static __init void nios2_clocksource_init(struct device_node *timer)
+{
+ unsigned int ctrl;
+ void __iomem *iobase;
+ u32 freq;
+
+ nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+
+ nios2_cs.timer.base = iobase;
+ nios2_cs.timer.freq = freq;
+
+ clocksource_register_hz(&nios2_cs.cs, freq);
+
+ timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
+ timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
+
+ /* interrupt disable + continuous + start */
+ ctrl = ALTERA_TIMER_CONTROL_CONT_MSK | ALTERA_TIMER_CONTROL_START_MSK;
+ timer_writew(&nios2_cs.timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+
+ /* Calibrate the delay loop directly */
+ lpj_fine = freq / HZ;
+}
+
+/*
+ * The first timer instance will use as a clockevent. If there are two or
+ * more instances, the second one gets used as clocksource and all
+ * others are unused.
+*/
+static void __init nios2_time_init(struct device_node *timer)
+{
+ static int num_called;
+
+ switch (num_called) {
+ case 0:
+ nios2_clockevent_init(timer);
+ break;
+ case 1:
+ nios2_clocksource_init(timer);
+ break;
+ default:
+ break;
+ }
+
+ num_called++;
+}
+
+void read_persistent_clock(struct timespec *ts)
+{
+ ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0);
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
+ clocksource_of_init();
+}
+
+CLOCKSOURCE_OF_DECLARE(nios2_timer, "altr,timer-1.0", nios2_time_init);
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
new file mode 100644
index 000000000000..b7b97641a9a6
--- /dev/null
+++ b/arch/nios2/kernel/traps.c
@@ -0,0 +1,185 @@
+/*
+ * Hardware exception handling
+ *
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ * Copyright (C) 2001 Vic Phillips
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+
+#include <asm/traps.h>
+#include <asm/sections.h>
+#include <asm/uaccess.h>
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char *str, struct pt_regs *regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ pr_warn("Oops: %s, sig: %ld\n", str, err);
+ show_regs(regs);
+ spin_unlock_irq(&die_lock);
+ /*
+ * do_exit() should take care of panic'ing from an interrupt
+ * context so we don't handle it here
+ */
+ do_exit(err);
+}
+
+void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
+{
+ siginfo_t info;
+
+ if (!user_mode(regs))
+ die("Exception in kernel mode", regs, signo);
+
+ info.si_signo = signo;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = (void __user *) addr;
+ force_sig_info(signo, &info, current);
+}
+
+/*
+ * The show_stack is an external API which we do not use ourselves.
+ */
+
+int kstack_depth_to_print = 48;
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *endstack, addr;
+ int i;
+
+ if (!stack) {
+ if (task)
+ stack = (unsigned long *)task->thread.ksp;
+ else
+ stack = (unsigned long *)&stack;
+ }
+
+ addr = (unsigned long) stack;
+ endstack = (unsigned long *) PAGE_ALIGN(addr);
+
+ pr_emerg("Stack from %08lx:", (unsigned long)stack);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (stack + 1 > endstack)
+ break;
+ if (i % 8 == 0)
+ pr_emerg("\n ");
+ pr_emerg(" %08lx", *stack++);
+ }
+
+ pr_emerg("\nCall Trace:");
+ i = 0;
+ while (stack + 1 <= endstack) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (((addr >= (unsigned long) _stext) &&
+ (addr <= (unsigned long) _etext))) {
+ if (i % 4 == 0)
+ pr_emerg("\n ");
+ pr_emerg(" [<%08lx>]", addr);
+ i++;
+ }
+ }
+ pr_emerg("\n");
+}
+
+void __init trap_init(void)
+{
+ /* Nothing to do here */
+}
+
+/* Breakpoint handler */
+asmlinkage void breakpoint_c(struct pt_regs *fp)
+{
+ /*
+ * The breakpoint entry code has moved the PC on by 4 bytes, so we must
+ * move it back. This could be done on the host but we do it here
+ * because monitor.S of JTAG gdbserver does it too.
+ */
+ fp->ea -= 4;
+ _exception(SIGTRAP, fp, TRAP_BRKPT, fp->ea);
+}
+
+#ifndef CONFIG_NIOS2_ALIGNMENT_TRAP
+/* Alignment exception handler */
+asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
+{
+ unsigned long addr = RDCTL(CTL_BADADDR);
+
+ cause >>= 2;
+ fp->ea -= 4;
+
+ if (fixup_exception(fp))
+ return;
+
+ if (!user_mode(fp)) {
+ pr_alert("Unaligned access from kernel mode, this might be a hardware\n");
+ pr_alert("problem, dump registers and restart the instruction\n");
+ pr_alert(" BADADDR 0x%08lx\n", addr);
+ pr_alert(" cause %d\n", cause);
+ pr_alert(" op-code 0x%08lx\n", *(unsigned long *)(fp->ea));
+ show_regs(fp);
+ return;
+ }
+
+ _exception(SIGBUS, fp, BUS_ADRALN, addr);
+}
+#endif /* CONFIG_NIOS2_ALIGNMENT_TRAP */
+
+/* Illegal instruction handler */
+asmlinkage void handle_illegal_c(struct pt_regs *fp)
+{
+ fp->ea -= 4;
+ _exception(SIGILL, fp, ILL_ILLOPC, fp->ea);
+}
+
+/* Supervisor instruction handler */
+asmlinkage void handle_supervisor_instr(struct pt_regs *fp)
+{
+ fp->ea -= 4;
+ _exception(SIGILL, fp, ILL_PRVOPC, fp->ea);
+}
+
+/* Division error handler */
+asmlinkage void handle_diverror_c(struct pt_regs *fp)
+{
+ fp->ea -= 4;
+ _exception(SIGFPE, fp, FPE_INTDIV, fp->ea);
+}
+
+/* Unhandled exception handler */
+asmlinkage void unhandled_exception(struct pt_regs *regs, int cause)
+{
+ unsigned long addr = RDCTL(CTL_BADADDR);
+
+ cause /= 4;
+
+ pr_emerg("Unhandled exception #%d in %s mode (badaddr=0x%08lx)\n",
+ cause, user_mode(regs) ? "user" : "kernel", addr);
+
+ regs->ea -= 4;
+ show_regs(regs);
+
+ pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea));
+}
diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..326fab40a9de
--- /dev/null
+++ b/arch/nios2/kernel/vmlinux.lds.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-littlenios2", "elf32-littlenios2", "elf32-littlenios2")
+
+OUTPUT_ARCH(nios)
+ENTRY(_start) /* Defined in head.S */
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE;
+
+ _text = .;
+ _stext = .;
+ HEAD_TEXT_SECTION
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ IRQENTRY_TEXT
+ KPROBES_TEXT
+ } =0
+ _etext = .;
+
+ .got : {
+ *(.got.plt)
+ *(.igot.plt)
+ *(.got)
+ *(.igot)
+ }
+
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ __init_end = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ NOTES
+
+ DISCARDS
+}
diff --git a/arch/nios2/lib/Makefile b/arch/nios2/lib/Makefile
new file mode 100644
index 000000000000..557256628ecd
--- /dev/null
+++ b/arch/nios2/lib/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Nios2-specific library files.
+#
+
+lib-y += delay.o
+lib-y += memcpy.o
+lib-y += memmove.o
+lib-y += memset.o
diff --git a/arch/nios2/lib/delay.c b/arch/nios2/lib/delay.c
new file mode 100644
index 000000000000..088119cd0cc5
--- /dev/null
+++ b/arch/nios2/lib/delay.c
@@ -0,0 +1,52 @@
+/* Copyright Altera Corporation (C) 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/delay.h>
+#include <asm/param.h>
+#include <asm/processor.h>
+#include <asm/timex.h>
+
+void __delay(unsigned long cycles)
+{
+ cycles_t start = get_cycles();
+
+ while ((get_cycles() - start) < cycles)
+ cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+void __const_udelay(unsigned long xloops)
+{
+ u64 loops;
+
+ loops = (u64)xloops * loops_per_jiffy * HZ;
+
+ __delay(loops >> 32);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/nios2/lib/memcpy.c b/arch/nios2/lib/memcpy.c
new file mode 100644
index 000000000000..1715f5d28b11
--- /dev/null
+++ b/arch/nios2/lib/memcpy.c
@@ -0,0 +1,202 @@
+/* Extracted from GLIBC memcpy.c and memcopy.h, which is:
+ Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Torbjorn Granlund (tege@sics.se).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <linux/types.h>
+
+/* Type to use for aligned memory operations.
+ This should normally be the biggest type supported by a single load
+ and store. */
+#define op_t unsigned long int
+#define OPSIZ (sizeof(op_t))
+
+/* Optimal type for storing bytes in registers. */
+#define reg_char char
+
+#define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
+
+/* Copy exactly NBYTES bytes from SRC_BP to DST_BP,
+ without any assumptions about alignment of the pointers. */
+#define BYTE_COPY_FWD(dst_bp, src_bp, nbytes) \
+do { \
+ size_t __nbytes = (nbytes); \
+ while (__nbytes > 0) { \
+ unsigned char __x = ((unsigned char *) src_bp)[0]; \
+ src_bp += 1; \
+ __nbytes -= 1; \
+ ((unsigned char *) dst_bp)[0] = __x; \
+ dst_bp += 1; \
+ } \
+} while (0)
+
+/* Copy *up to* NBYTES bytes from SRC_BP to DST_BP, with
+ the assumption that DST_BP is aligned on an OPSIZ multiple. If
+ not all bytes could be easily copied, store remaining number of bytes
+ in NBYTES_LEFT, otherwise store 0. */
+/* extern void _wordcopy_fwd_aligned __P ((long int, long int, size_t)); */
+/* extern void _wordcopy_fwd_dest_aligned __P ((long int, long int, size_t)); */
+#define WORD_COPY_FWD(dst_bp, src_bp, nbytes_left, nbytes) \
+do { \
+ if (src_bp % OPSIZ == 0) \
+ _wordcopy_fwd_aligned(dst_bp, src_bp, (nbytes) / OPSIZ);\
+ else \
+ _wordcopy_fwd_dest_aligned(dst_bp, src_bp, (nbytes) / OPSIZ);\
+ src_bp += (nbytes) & -OPSIZ; \
+ dst_bp += (nbytes) & -OPSIZ; \
+ (nbytes_left) = (nbytes) % OPSIZ; \
+} while (0)
+
+
+/* Threshold value for when to enter the unrolled loops. */
+#define OP_T_THRES 16
+
+/* _wordcopy_fwd_aligned -- Copy block beginning at SRCP to
+ block beginning at DSTP with LEN `op_t' words (not LEN bytes!).
+ Both SRCP and DSTP should be aligned for memory operations on `op_t's. */
+/* stream-lined (read x8 + write x8) */
+static void _wordcopy_fwd_aligned(long int dstp, long int srcp, size_t len)
+{
+ while (len > 7) {
+ register op_t a0, a1, a2, a3, a4, a5, a6, a7;
+
+ a0 = ((op_t *) srcp)[0];
+ a1 = ((op_t *) srcp)[1];
+ a2 = ((op_t *) srcp)[2];
+ a3 = ((op_t *) srcp)[3];
+ a4 = ((op_t *) srcp)[4];
+ a5 = ((op_t *) srcp)[5];
+ a6 = ((op_t *) srcp)[6];
+ a7 = ((op_t *) srcp)[7];
+ ((op_t *) dstp)[0] = a0;
+ ((op_t *) dstp)[1] = a1;
+ ((op_t *) dstp)[2] = a2;
+ ((op_t *) dstp)[3] = a3;
+ ((op_t *) dstp)[4] = a4;
+ ((op_t *) dstp)[5] = a5;
+ ((op_t *) dstp)[6] = a6;
+ ((op_t *) dstp)[7] = a7;
+
+ srcp += 8 * OPSIZ;
+ dstp += 8 * OPSIZ;
+ len -= 8;
+ }
+ while (len > 0) {
+ *(op_t *)dstp = *(op_t *)srcp;
+
+ srcp += OPSIZ;
+ dstp += OPSIZ;
+ len -= 1;
+ }
+}
+
+/* _wordcopy_fwd_dest_aligned -- Copy block beginning at SRCP to
+ block beginning at DSTP with LEN `op_t' words (not LEN bytes!).
+ DSTP should be aligned for memory operations on `op_t's, but SRCP must
+ *not* be aligned. */
+/* stream-lined (read x4 + write x4) */
+static void _wordcopy_fwd_dest_aligned(long int dstp, long int srcp,
+ size_t len)
+{
+ op_t ap;
+ int sh_1, sh_2;
+
+ /* Calculate how to shift a word read at the memory operation
+ aligned srcp to make it aligned for copy. */
+
+ sh_1 = 8 * (srcp % OPSIZ);
+ sh_2 = 8 * OPSIZ - sh_1;
+
+ /* Make SRCP aligned by rounding it down to the beginning of the `op_t'
+ it points in the middle of. */
+ srcp &= -OPSIZ;
+ ap = ((op_t *) srcp)[0];
+ srcp += OPSIZ;
+
+ while (len > 3) {
+ op_t a0, a1, a2, a3;
+
+ a0 = ((op_t *) srcp)[0];
+ a1 = ((op_t *) srcp)[1];
+ a2 = ((op_t *) srcp)[2];
+ a3 = ((op_t *) srcp)[3];
+ ((op_t *) dstp)[0] = MERGE(ap, sh_1, a0, sh_2);
+ ((op_t *) dstp)[1] = MERGE(a0, sh_1, a1, sh_2);
+ ((op_t *) dstp)[2] = MERGE(a1, sh_1, a2, sh_2);
+ ((op_t *) dstp)[3] = MERGE(a2, sh_1, a3, sh_2);
+
+ ap = a3;
+ srcp += 4 * OPSIZ;
+ dstp += 4 * OPSIZ;
+ len -= 4;
+ }
+ while (len > 0) {
+ register op_t a0;
+
+ a0 = ((op_t *) srcp)[0];
+ ((op_t *) dstp)[0] = MERGE(ap, sh_1, a0, sh_2);
+
+ ap = a0;
+ srcp += OPSIZ;
+ dstp += OPSIZ;
+ len -= 1;
+ }
+}
+
+void *memcpy(void *dstpp, const void *srcpp, size_t len)
+{
+ unsigned long int dstp = (long int) dstpp;
+ unsigned long int srcp = (long int) srcpp;
+
+ /* Copy from the beginning to the end. */
+
+ /* If there not too few bytes to copy, use word copy. */
+ if (len >= OP_T_THRES) {
+ /* Copy just a few bytes to make DSTP aligned. */
+ len -= (-dstp) % OPSIZ;
+ BYTE_COPY_FWD(dstp, srcp, (-dstp) % OPSIZ);
+
+ /* Copy whole pages from SRCP to DSTP by virtual address
+ manipulation, as much as possible. */
+
+ /* PAGE_COPY_FWD_MAYBE (dstp, srcp, len, len); */
+
+ /* Copy from SRCP to DSTP taking advantage of the known
+ alignment of DSTP. Number of bytes remaining is put in the
+ third argument, i.e. in LEN. This number may vary from
+ machine to machine. */
+
+ WORD_COPY_FWD(dstp, srcp, len, len);
+
+ /* Fall out and copy the tail. */
+ }
+
+ /* There are just a few bytes to copy. Use byte memory operations. */
+ BYTE_COPY_FWD(dstp, srcp, len);
+
+ return dstpp;
+}
+
+void *memcpyb(void *dstpp, const void *srcpp, unsigned len)
+{
+ unsigned long int dstp = (long int) dstpp;
+ unsigned long int srcp = (long int) srcpp;
+
+ BYTE_COPY_FWD(dstp, srcp, len);
+
+ return dstpp;
+}
diff --git a/arch/nios2/lib/memmove.c b/arch/nios2/lib/memmove.c
new file mode 100644
index 000000000000..c65ef517eb80
--- /dev/null
+++ b/arch/nios2/lib/memmove.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifdef __HAVE_ARCH_MEMMOVE
+void *memmove(void *d, const void *s, size_t count)
+{
+ unsigned long dst, src;
+
+ if (!count)
+ return d;
+
+ if (d < s) {
+ dst = (unsigned long) d;
+ src = (unsigned long) s;
+
+ if ((count < 8) || ((dst ^ src) & 3))
+ goto restup;
+
+ if (dst & 1) {
+ *(char *)dst++ = *(char *)src++;
+ count--;
+ }
+ if (dst & 2) {
+ *(short *)dst = *(short *)src;
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+ while (count > 3) {
+ *(long *)dst = *(long *)src;
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+restup:
+ while (count--)
+ *(char *)dst++ = *(char *)src++;
+ } else {
+ dst = (unsigned long) d + count;
+ src = (unsigned long) s + count;
+
+ if ((count < 8) || ((dst ^ src) & 3))
+ goto restdown;
+
+ if (dst & 1) {
+ src--;
+ dst--;
+ count--;
+ *(char *)dst = *(char *)src;
+ }
+ if (dst & 2) {
+ src -= 2;
+ dst -= 2;
+ count -= 2;
+ *(short *)dst = *(short *)src;
+ }
+ while (count > 3) {
+ src -= 4;
+ dst -= 4;
+ count -= 4;
+ *(long *)dst = *(long *)src;
+ }
+restdown:
+ while (count--) {
+ src--;
+ dst--;
+ *(char *)dst = *(char *)src;
+ }
+ }
+
+ return d;
+}
+#endif /* __HAVE_ARCH_MEMMOVE */
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
new file mode 100644
index 000000000000..65e97802f5cc
--- /dev/null
+++ b/arch/nios2/lib/memset.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifdef __HAVE_ARCH_MEMSET
+void *memset(void *s, int c, size_t count)
+{
+ int destptr, charcnt, dwordcnt, fill8reg, wrkrega;
+
+ if (!count)
+ return s;
+
+ c &= 0xFF;
+
+ if (count <= 8) {
+ char *xs = (char *) s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+ }
+
+ __asm__ __volatile__ (
+ /* fill8 %3, %5 (c & 0xff) */
+ " slli %4, %5, 8\n"
+ " or %4, %4, %5\n"
+ " slli %3, %4, 16\n"
+ " or %3, %3, %4\n"
+ /* Word-align %0 (s) if necessary */
+ " andi %4, %0, 0x01\n"
+ " beq %4, zero, 1f\n"
+ " addi %1, %1, -1\n"
+ " stb %3, 0(%0)\n"
+ " addi %0, %0, 1\n"
+ "1: mov %2, %1\n"
+ /* Dword-align %0 (s) if necessary */
+ " andi %4, %0, 0x02\n"
+ " beq %4, zero, 2f\n"
+ " addi %1, %1, -2\n"
+ " sth %3, 0(%0)\n"
+ " addi %0, %0, 2\n"
+ " mov %2, %1\n"
+ /* %1 and %2 are how many more bytes to set */
+ "2: srli %2, %2, 2\n"
+ /* %2 is how many dwords to set */
+ "3: stw %3, 0(%0)\n"
+ " addi %0, %0, 4\n"
+ " addi %2, %2, -1\n"
+ " bne %2, zero, 3b\n"
+ /* store residual word and/or byte if necessary */
+ " andi %4, %1, 0x02\n"
+ " beq %4, zero, 4f\n"
+ " sth %3, 0(%0)\n"
+ " addi %0, %0, 2\n"
+ /* store residual byte if necessary */
+ "4: andi %4, %1, 0x01\n"
+ " beq %4, zero, 5f\n"
+ " stb %3, 0(%0)\n"
+ "5:\n"
+ : "=r" (destptr), /* %0 Output */
+ "=r" (charcnt), /* %1 Output */
+ "=r" (dwordcnt), /* %2 Output */
+ "=r" (fill8reg), /* %3 Output */
+ "=r" (wrkrega) /* %4 Output */
+ : "r" (c), /* %5 Input */
+ "0" (s), /* %0 Input/Output */
+ "1" (count) /* %1 Input/Output */
+ : "memory" /* clobbered */
+ );
+
+ return s;
+}
+#endif /* __HAVE_ARCH_MEMSET */
diff --git a/arch/nios2/mm/Makefile b/arch/nios2/mm/Makefile
new file mode 100644
index 000000000000..3cbd0840873c
--- /dev/null
+++ b/arch/nios2/mm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Nios2-specific parts of the memory manager.
+#
+
+obj-y += cacheflush.o
+obj-y += dma-mapping.o
+obj-y += extable.o
+obj-y += fault.o
+obj-y += init.o
+obj-y += ioremap.o
+obj-y += mmu_context.o
+obj-y += pgtable.o
+obj-y += tlb.o
+obj-y += uaccess.o
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
new file mode 100644
index 000000000000..2ae482b42669
--- /dev/null
+++ b/arch/nios2/mm/cacheflush.c
@@ -0,0 +1,271 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
+
+static void __flush_dcache(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ start &= ~(cpuinfo.dcache_line_size - 1);
+ end += (cpuinfo.dcache_line_size - 1);
+ end &= ~(cpuinfo.dcache_line_size - 1);
+
+ if (end > start + cpuinfo.dcache_size)
+ end = start + cpuinfo.dcache_size;
+
+ for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
+ __asm__ __volatile__ (" flushda 0(%0)\n"
+ : /* Outputs */
+ : /* Inputs */ "r"(addr)
+ /* : No clobber */);
+ }
+}
+
+static void __flush_dcache_all(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ start &= ~(cpuinfo.dcache_line_size - 1);
+ end += (cpuinfo.dcache_line_size - 1);
+ end &= ~(cpuinfo.dcache_line_size - 1);
+
+ if (end > start + cpuinfo.dcache_size)
+ end = start + cpuinfo.dcache_size;
+
+ for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
+ __asm__ __volatile__ (" flushd 0(%0)\n"
+ : /* Outputs */
+ : /* Inputs */ "r"(addr)
+ /* : No clobber */);
+ }
+}
+
+static void __invalidate_dcache(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ start &= ~(cpuinfo.dcache_line_size - 1);
+ end += (cpuinfo.dcache_line_size - 1);
+ end &= ~(cpuinfo.dcache_line_size - 1);
+
+ if (end > start + cpuinfo.dcache_size)
+ end = start + cpuinfo.dcache_size;
+
+ for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
+ __asm__ __volatile__ (" initda 0(%0)\n"
+ : /* Outputs */
+ : /* Inputs */ "r"(addr)
+ /* : No clobber */);
+ }
+}
+
+static void __flush_icache(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ start &= ~(cpuinfo.icache_line_size - 1);
+ end += (cpuinfo.icache_line_size - 1);
+ end &= ~(cpuinfo.icache_line_size - 1);
+
+ if (end > start + cpuinfo.icache_size)
+ end = start + cpuinfo.icache_size;
+
+ for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
+ __asm__ __volatile__ (" flushi %0\n"
+ : /* Outputs */
+ : /* Inputs */ "r"(addr)
+ /* : No clobber */);
+ }
+ __asm__ __volatile(" flushp\n");
+}
+
+static void flush_aliases(struct address_space *mapping, struct page *page)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct vm_area_struct *mpnt;
+ pgoff_t pgoff;
+
+ pgoff = page->index;
+
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long offset;
+
+ if (mpnt->vm_mm != mm)
+ continue;
+ if (!(mpnt->vm_flags & VM_MAYSHARE))
+ continue;
+
+ offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+ flush_cache_page(mpnt, mpnt->vm_start + offset,
+ page_to_pfn(page));
+ }
+ flush_dcache_mmap_unlock(mapping);
+}
+
+void flush_cache_all(void)
+{
+ __flush_dcache_all(0, cpuinfo.dcache_size);
+ __flush_icache(0, cpuinfo.icache_size);
+}
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+ flush_cache_all();
+}
+
+void flush_cache_dup_mm(struct mm_struct *mm)
+{
+ flush_cache_all();
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ __flush_icache(start, end);
+}
+
+void flush_dcache_range(unsigned long start, unsigned long end)
+{
+ __flush_dcache(start, end);
+}
+EXPORT_SYMBOL(flush_dcache_range);
+
+void invalidate_dcache_range(unsigned long start, unsigned long end)
+{
+ __invalidate_dcache(start, end);
+}
+EXPORT_SYMBOL(invalidate_dcache_range);
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __flush_dcache(start, end);
+ if (vma == NULL || (vma->vm_flags & VM_EXEC))
+ __flush_icache(start, end);
+}
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned long end = start + PAGE_SIZE;
+
+ __flush_icache(start, end);
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long pfn)
+{
+ unsigned long start = vmaddr;
+ unsigned long end = start + PAGE_SIZE;
+
+ __flush_dcache(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache(start, end);
+}
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
+
+ /* Flush this page if there are aliases. */
+ if (mapping && !mapping_mapped(mapping)) {
+ clear_bit(PG_dcache_clean, &page->flags);
+ } else {
+ unsigned long start = (unsigned long)page_address(page);
+
+ __flush_dcache_all(start, start + PAGE_SIZE);
+ if (mapping)
+ flush_aliases(mapping, page);
+ set_bit(PG_dcache_clean, &page->flags);
+ }
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte)
+{
+ unsigned long pfn = pte_pfn(*pte);
+ struct page *page;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ page = pfn_to_page(pfn);
+ if (page == ZERO_PAGE(0))
+ return;
+
+ if (!PageReserved(page) &&
+ !test_and_set_bit(PG_dcache_clean, &page->flags)) {
+ unsigned long start = page_to_virt(page);
+ struct address_space *mapping;
+
+ __flush_dcache(start, start + PAGE_SIZE);
+
+ mapping = page_mapping(page);
+ if (mapping)
+ flush_aliases(mapping, page);
+ }
+}
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *to)
+{
+ __flush_dcache(vaddr, vaddr + PAGE_SIZE);
+ copy_page(vto, vfrom);
+ __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
+}
+
+void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
+{
+ __flush_dcache(vaddr, vaddr + PAGE_SIZE);
+ clear_page(addr);
+ __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr,
+ void *dst, void *src, int len)
+{
+ flush_cache_page(vma, user_vaddr, page_to_pfn(page));
+ memcpy(dst, src, len);
+ __flush_dcache((unsigned long)src, (unsigned long)src + len);
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache((unsigned long)src, (unsigned long)src + len);
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr,
+ void *dst, void *src, int len)
+{
+ flush_cache_page(vma, user_vaddr, page_to_pfn(page));
+ memcpy(dst, src, len);
+ __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache((unsigned long)dst, (unsigned long)dst + len);
+}
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
new file mode 100644
index 000000000000..ac5da7594f0b
--- /dev/null
+++ b/arch/nios2/mm/dma-mapping.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * Based on DMA code from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/cache.h>
+#include <asm/cacheflush.h>
+
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *ret;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ /* optimized page clearing */
+ gfp |= __GFP_ZERO;
+
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+ if (ret != NULL) {
+ *dma_handle = virt_to_phys(ret);
+ flush_dcache_range((unsigned long) ret,
+ (unsigned long) ret + size);
+ ret = UNCAC_ADDR(ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
+
+ free_pages(addr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ for_each_sg(sg, sg, nents, i) {
+ void *addr;
+
+ addr = sg_virt(sg);
+ if (addr) {
+ __dma_sync_for_device(addr, sg->length, direction);
+ sg->dma_address = sg_phys(sg);
+ }
+ }
+
+ return nents;
+}
+EXPORT_SYMBOL(dma_map_sg);
+
+dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ void *addr;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ addr = page_address(page) + offset;
+ __dma_sync_for_device(addr, size, direction);
+
+ return page_to_phys(page) + offset;
+}
+EXPORT_SYMBOL(dma_map_page);
+
+void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+EXPORT_SYMBOL(dma_unmap_page);
+
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ void *addr;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ if (direction == DMA_TO_DEVICE)
+ return;
+
+ for_each_sg(sg, sg, nhwentries, i) {
+ addr = sg_virt(sg);
+ if (addr)
+ __dma_sync_for_cpu(addr, sg->length, direction);
+ }
+}
+EXPORT_SYMBOL(dma_unmap_sg);
+
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
+
+void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+EXPORT_SYMBOL(dma_sync_single_range_for_device);
+
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for_each_sg(sg, sg, nelems, i)
+ __dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for_each_sg(sg, sg, nelems, i)
+ __dma_sync_for_device(sg_virt(sg), sg->length, direction);
+
+}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/nios2/mm/extable.c b/arch/nios2/mm/extable.c
new file mode 100644
index 000000000000..4d2fc5a589d0
--- /dev/null
+++ b/arch/nios2/mm/extable.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010, Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(regs->ea);
+ if (fixup) {
+ regs->ea = fixup->fixup;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
new file mode 100644
index 000000000000..15a0bb5fc06d
--- /dev/null
+++ b/arch/nios2/mm/fault.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * based on arch/mips/mm/fault.c which is:
+ *
+ * Copyright (C) 1995-2000 Ralf Baechle
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+
+#include <asm/mmu_context.h>
+#include <asm/traps.h>
+
+#define EXC_SUPERV_INSN_ACCESS 9 /* Supervisor only instruction address */
+#define EXC_SUPERV_DATA_ACCESS 11 /* Supervisor only data address */
+#define EXC_X_PROTECTION_FAULT 13 /* TLB permission violation (x) */
+#define EXC_R_PROTECTION_FAULT 14 /* TLB permission violation (r) */
+#define EXC_W_PROTECTION_FAULT 15 /* TLB permission violation (w) */
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
+ unsigned long address)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ int code = SEGV_MAPERR;
+ int fault;
+ unsigned int flags = 0;
+
+ cause >>= 2;
+
+ /* Restart the instruction */
+ regs->ea -= 4;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) {
+ if (user_mode(regs))
+ goto bad_area_nosemaphore;
+ else
+ goto vmalloc_fault;
+ }
+
+ if (unlikely(address >= TASK_SIZE))
+ goto bad_area_nosemaphore;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!user_mode(regs) && !search_exception_tables(regs->ea))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ code = SEGV_ACCERR;
+
+ switch (cause) {
+ case EXC_SUPERV_INSN_ACCESS:
+ goto bad_area;
+ case EXC_SUPERV_DATA_ACCESS:
+ goto bad_area;
+ case EXC_X_PROTECTION_FAULT:
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ break;
+ case EXC_R_PROTECTION_FAULT:
+ if (!(vma->vm_flags & VM_READ))
+ goto bad_area;
+ break;
+ case EXC_W_PROTECTION_FAULT:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags = FAULT_FLAG_WRITE;
+ break;
+ }
+
+survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, flags);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ pr_alert("%s: unhandled page fault (%d) at 0x%08lx, "
+ "cause %ld\n", current->comm, SIGSEGV, address, cause);
+ show_regs(regs);
+ _exception(SIGSEGV, regs, code, address);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ pr_alert("Unable to handle kernel %s at virtual address %08lx",
+ address < PAGE_SIZE ? "NULL pointer dereference" :
+ "paging request", address);
+ pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
+ cause);
+ panic("Oops");
+ return;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (is_global_init(tsk)) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+
+ _exception(SIGBUS, regs, BUS_ADRERR, address);
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = pgd_current + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+
+ flush_tlb_one(address);
+ return;
+ }
+}
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
new file mode 100644
index 000000000000..e75c75d249d6
--- /dev/null
+++ b/arch/nios2/mm/init.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * based on arch/m68k/mm/init.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/cpuinfo.h>
+#include <asm/processor.h>
+
+pgd_t *pgd_current;
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ memset(zones_size, 0, sizeof(zones_size));
+
+ pagetable_init();
+ pgd_current = swapper_pg_dir;
+
+ zones_size[ZONE_NORMAL] = max_mapnr;
+
+ /* pass the memory from the bootmem allocator to the main allocator */
+ free_area_init(zones_size);
+
+ flush_dcache_range((unsigned long)empty_zero_page,
+ (unsigned long)empty_zero_page + PAGE_SIZE);
+}
+
+void __init mem_init(void)
+{
+ unsigned long end_mem = memory_end; /* this must not include
+ kernel stack at top */
+
+ pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
+
+ end_mem &= PAGE_MASK;
+ high_memory = __va(end_mem);
+
+ /* this will put all memory onto the freelists */
+ free_all_bootmem();
+ mem_init_print_info(NULL);
+}
+
+void __init mmu_init(void)
+{
+ flush_tlb_all();
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
+
+void __init_refok free_initmem(void)
+{
+ free_initmem_default(-1);
+}
+
+#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+static struct page *kuser_page[1];
+
+static int alloc_kuser_page(void)
+{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned long vpage;
+
+ vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!vpage)
+ return -ENOMEM;
+
+ /* Copy kuser helpers */
+ memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
+
+ flush_icache_range(vpage, vpage + KUSER_SIZE);
+ kuser_page[0] = virt_to_page(vpage);
+
+ return 0;
+}
+arch_initcall(alloc_kuser_page);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+
+ /* Map kuser helpers to user space address */
+ ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYEXEC, kuser_page);
+
+ up_write(&mm->mmap_sem);
+
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
+}
diff --git a/arch/nios2/mm/ioremap.c b/arch/nios2/mm/ioremap.c
new file mode 100644
index 000000000000..3a28177a01eb
--- /dev/null
+++ b/arch/nios2/mm/ioremap.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static inline void remap_area_pte(pte_t *pte, unsigned long address,
+ unsigned long size, unsigned long phys_addr,
+ unsigned long flags)
+{
+ unsigned long end;
+ unsigned long pfn;
+ pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
+ | _PAGE_WRITE | flags);
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+ pfn = PFN_DOWN(phys_addr);
+ do {
+ if (!pte_none(*pte)) {
+ pr_err("remap_area_pte: page already exists\n");
+ BUG();
+ }
+ set_pte(pte, pfn_pte(pfn, pgprot));
+ address += PAGE_SIZE;
+ pfn++;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int remap_area_pmd(pmd_t *pmd, unsigned long address,
+ unsigned long size, unsigned long phys_addr,
+ unsigned long flags)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t *pte = pte_alloc_kernel(pmd, address);
+
+ if (!pte)
+ return -ENOMEM;
+ remap_area_pte(pte, address, end - address, address + phys_addr,
+ flags);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+static int remap_area_pages(unsigned long address, unsigned long phys_addr,
+ unsigned long size, unsigned long flags)
+{
+ int error;
+ pgd_t *dir;
+ unsigned long end = address + size;
+
+ phys_addr -= address;
+ dir = pgd_offset(&init_mm, address);
+ flush_cache_all();
+ if (address >= end)
+ BUG();
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ error = -ENOMEM;
+ pud = pud_alloc(&init_mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, address);
+ if (!pmd)
+ break;
+ if (remap_area_pmd(pmd, address, end - address,
+ phys_addr + address, flags))
+ break;
+ error = 0;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ flush_tlb_all();
+ return error;
+}
+
+#define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
+
+/*
+ * Map some physical address range into the kernel address space.
+ */
+void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long cacheflag)
+{
+ struct vm_struct *area;
+ unsigned long offset;
+ unsigned long last_addr;
+ void *addr;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /* Don't allow anybody to remap normal RAM that we're using */
+ if (phys_addr > PHYS_OFFSET && phys_addr < virt_to_phys(high_memory)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = __va(phys_addr);
+ t_end = t_addr + (size - 1);
+ for (page = virt_to_page(t_addr);
+ page <= virt_to_page(t_end); page++)
+ if (!PageReserved(page))
+ return NULL;
+ }
+
+ /*
+ * Map uncached objects in the low part of address space to
+ * CONFIG_NIOS2_IO_REGION_BASE
+ */
+ if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
+ IS_MAPPABLE_UNCACHEABLE(last_addr) &&
+ !(cacheflag & _PAGE_CACHED))
+ return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
+
+ /* Mappings have to be page-aligned */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /* Ok, go for it */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ if (remap_area_pages((unsigned long) addr, phys_addr, size,
+ cacheflag)) {
+ vunmap(addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char *)addr);
+}
+EXPORT_SYMBOL(__ioremap);
+
+/*
+ * __iounmap unmaps nearly everything, so be careful
+ * it doesn't free currently pointer/page tables anymore but it
+ * wasn't used anyway and might be added later.
+ */
+void __iounmap(void __iomem *addr)
+{
+ struct vm_struct *p;
+
+ if ((unsigned long) addr > CONFIG_NIOS2_IO_REGION_BASE)
+ return;
+
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p)
+ pr_err("iounmap: bad address %p\n", addr);
+ kfree(p);
+}
+EXPORT_SYMBOL(__iounmap);
diff --git a/arch/nios2/mm/mmu_context.c b/arch/nios2/mm/mmu_context.c
new file mode 100644
index 000000000000..45d6b9c58d67
--- /dev/null
+++ b/arch/nios2/mm/mmu_context.c
@@ -0,0 +1,116 @@
+/*
+ * MMU context handling.
+ *
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mm.h>
+
+#include <asm/cpuinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* The pids position and mask in context */
+#define PID_SHIFT 0
+#define PID_BITS (cpuinfo.tlb_pid_num_bits)
+#define PID_MASK ((1UL << PID_BITS) - 1)
+
+/* The versions position and mask in context */
+#define VERSION_BITS (32 - PID_BITS)
+#define VERSION_SHIFT (PID_SHIFT + PID_BITS)
+#define VERSION_MASK ((1UL << VERSION_BITS) - 1)
+
+/* Return the version part of a context */
+#define CTX_VERSION(c) (((c) >> VERSION_SHIFT) & VERSION_MASK)
+
+/* Return the pid part of a context */
+#define CTX_PID(c) (((c) >> PID_SHIFT) & PID_MASK)
+
+/* Value of the first context (version 1, pid 0) */
+#define FIRST_CTX ((1UL << VERSION_SHIFT) | (0 << PID_SHIFT))
+
+static mm_context_t next_mmu_context;
+
+/*
+ * Initialize MMU context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* We need to set this here because the value depends on runtime data
+ * from cpuinfo */
+ next_mmu_context = FIRST_CTX;
+}
+
+/*
+ * Set new context (pid), keep way
+ */
+static void set_context(mm_context_t context)
+{
+ set_mmu_pid(CTX_PID(context));
+}
+
+static mm_context_t get_new_context(void)
+{
+ /* Return the next pid */
+ next_mmu_context += (1UL << PID_SHIFT);
+
+ /* If the pid field wraps around we increase the version and
+ * flush the tlb */
+ if (unlikely(CTX_PID(next_mmu_context) == 0)) {
+ /* Version is incremented since the pid increment above
+ * overflows info version */
+ flush_cache_all();
+ flush_tlb_all();
+ }
+
+ /* If the version wraps we start over with the first generation, we do
+ * not need to flush the tlb here since it's always done above */
+ if (unlikely(CTX_VERSION(next_mmu_context) == 0))
+ next_mmu_context = FIRST_CTX;
+
+ return next_mmu_context;
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* If the process context we are swapping in has a different context
+ * generation then we have it should get a new generation/pid */
+ if (unlikely(CTX_VERSION(next->context) !=
+ CTX_VERSION(next_mmu_context)))
+ next->context = get_new_context();
+
+ /* Save the current pgd so the fast tlb handler can find it */
+ pgd_current = next->pgd;
+
+ /* Set the current context */
+ set_context(next->context);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ next->context = get_new_context();
+ set_context(next->context);
+ pgd_current = next->pgd;
+}
+
+unsigned long get_pid_from_context(mm_context_t *context)
+{
+ return CTX_PID((*context));
+}
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
new file mode 100644
index 000000000000..61e24a25f71a
--- /dev/null
+++ b/arch/nios2/mm/pgtable.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/cpuinfo.h>
+
+/* pteaddr:
+ * ptbase | vpn* | zero
+ * 31-22 | 21-2 | 1-0
+ *
+ * *vpn is preserved on double fault
+ *
+ * tlbacc:
+ * IG |*flags| pfn
+ * 31-25|24-20 | 19-0
+ *
+ * *crwxg
+ *
+ * tlbmisc:
+ * resv |way |rd | we|pid |dbl|bad|perm|d
+ * 31-24 |23-20 |19 | 20|17-4|3 |2 |1 |0
+ *
+ */
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+static void pgd_init(pgd_t *pgd)
+{
+ unsigned long *p = (unsigned long *) pgd;
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i += 8) {
+ p[i + 0] = (unsigned long) invalid_pte_table;
+ p[i + 1] = (unsigned long) invalid_pte_table;
+ p[i + 2] = (unsigned long) invalid_pte_table;
+ p[i + 3] = (unsigned long) invalid_pte_table;
+ p[i + 4] = (unsigned long) invalid_pte_table;
+ p[i + 5] = (unsigned long) invalid_pte_table;
+ p[i + 6] = (unsigned long) invalid_pte_table;
+ p[i + 7] = (unsigned long) invalid_pte_table;
+ }
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret, *init;
+
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init(ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+
+ return ret;
+}
+
+void __init pagetable_init(void)
+{
+ /* Initialize the entire pgd. */
+ pgd_init(swapper_pg_dir);
+ pgd_init(swapper_pg_dir + USER_PTRS_PER_PGD);
+}
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
new file mode 100644
index 000000000000..cf10326aab1c
--- /dev/null
+++ b/arch/nios2/mm/tlb.c
@@ -0,0 +1,275 @@
+/*
+ * Nios2 TLB handling
+ *
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cpuinfo.h>
+
+#define TLB_INDEX_MASK \
+ ((((1UL << (cpuinfo.tlb_ptr_sz - cpuinfo.tlb_num_ways_log2))) - 1) \
+ << PAGE_SHIFT)
+
+/* Used as illegal PHYS_ADDR for TLB mappings
+ */
+#define MAX_PHYS_ADDR 0
+
+static void get_misc_and_pid(unsigned long *misc, unsigned long *pid)
+{
+ *misc = RDCTL(CTL_TLBMISC);
+ *misc &= (TLBMISC_PID | TLBMISC_WAY);
+ *pid = *misc & TLBMISC_PID;
+}
+
+/*
+ * All entries common to a mm share an asid. To effectively flush these
+ * entries, we just bump the asid.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (current->mm == mm)
+ flush_tlb_all();
+ else
+ memset(&mm->context, 0, sizeof(mm_context_t));
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
+{
+ unsigned int way;
+ unsigned long org_misc, pid_misc;
+
+ pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
+
+ /* remember pid/way until we return. */
+ get_misc_and_pid(&org_misc, &pid_misc);
+
+ WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2);
+
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ unsigned long pteaddr;
+ unsigned long tlbmisc;
+ unsigned long pid;
+
+ tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ pteaddr = RDCTL(CTL_PTEADDR);
+ tlbmisc = RDCTL(CTL_TLBMISC);
+ pid = (tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK;
+ if (((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) &&
+ pid == mmu_pid) {
+ unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE +
+ ((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) +
+ (addr & TLB_INDEX_MASK);
+ pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n",
+ vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT));
+
+ WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2);
+ tlbmisc = pid_misc | TLBMISC_WE |
+ (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
+ }
+ }
+
+ WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
+
+ while (start < end) {
+ flush_tlb_one_pid(start, mmu_pid);
+ start += PAGE_SIZE;
+ }
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ while (start < end) {
+ flush_tlb_one(start);
+ start += PAGE_SIZE;
+ }
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void flush_tlb_one(unsigned long addr)
+{
+ unsigned int way;
+ unsigned long org_misc, pid_misc;
+
+ pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
+
+ /* remember pid/way until we return. */
+ get_misc_and_pid(&org_misc, &pid_misc);
+
+ WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2);
+
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ unsigned long pteaddr;
+ unsigned long tlbmisc;
+
+ tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ pteaddr = RDCTL(CTL_PTEADDR);
+ tlbmisc = RDCTL(CTL_TLBMISC);
+
+ if ((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) {
+ unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE +
+ ((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) +
+ (addr & TLB_INDEX_MASK);
+
+ pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n",
+ vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT));
+
+ tlbmisc = pid_misc | TLBMISC_WE |
+ (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
+ }
+ }
+
+ WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void dump_tlb_line(unsigned long line)
+{
+ unsigned int way;
+ unsigned long org_misc;
+
+ pr_debug("dump tlb-entries for line=%#lx (addr %08lx)\n", line,
+ line << (PAGE_SHIFT + cpuinfo.tlb_num_ways_log2));
+
+ /* remember pid/way until we return */
+ org_misc = (RDCTL(CTL_TLBMISC) & (TLBMISC_PID | TLBMISC_WAY));
+
+ WRCTL(CTL_PTEADDR, line << 2);
+
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ unsigned long pteaddr;
+ unsigned long tlbmisc;
+ unsigned long tlbacc;
+
+ WRCTL(CTL_TLBMISC, TLBMISC_RD | (way << TLBMISC_WAY_SHIFT));
+ pteaddr = RDCTL(CTL_PTEADDR);
+ tlbmisc = RDCTL(CTL_TLBMISC);
+ tlbacc = RDCTL(CTL_TLBACC);
+
+ if ((tlbacc << PAGE_SHIFT) != (MAX_PHYS_ADDR & PAGE_MASK)) {
+ pr_debug("-- way:%02x vpn:0x%08lx phys:0x%08lx pid:0x%02lx flags:%c%c%c%c%c\n",
+ way,
+ (pteaddr << (PAGE_SHIFT-2)),
+ (tlbacc << PAGE_SHIFT),
+ ((tlbmisc >> TLBMISC_PID_SHIFT) &
+ TLBMISC_PID_MASK),
+ (tlbacc & _PAGE_READ ? 'r' : '-'),
+ (tlbacc & _PAGE_WRITE ? 'w' : '-'),
+ (tlbacc & _PAGE_EXEC ? 'x' : '-'),
+ (tlbacc & _PAGE_GLOBAL ? 'g' : '-'),
+ (tlbacc & _PAGE_CACHED ? 'c' : '-'));
+ }
+ }
+
+ WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void dump_tlb(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < cpuinfo.tlb_num_lines; i++)
+ dump_tlb_line(i);
+}
+
+void flush_tlb_pid(unsigned long pid)
+{
+ unsigned int line;
+ unsigned int way;
+ unsigned long org_misc, pid_misc;
+
+ /* remember pid/way until we return */
+ get_misc_and_pid(&org_misc, &pid_misc);
+
+ for (line = 0; line < cpuinfo.tlb_num_lines; line++) {
+ WRCTL(CTL_PTEADDR, line << 2);
+
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ unsigned long pteaddr;
+ unsigned long tlbmisc;
+ unsigned long tlbacc;
+
+ tlbmisc = pid_misc | TLBMISC_RD |
+ (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ pteaddr = RDCTL(CTL_PTEADDR);
+ tlbmisc = RDCTL(CTL_TLBMISC);
+ tlbacc = RDCTL(CTL_TLBACC);
+
+ if (((tlbmisc>>TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK)
+ == pid) {
+ tlbmisc = pid_misc | TLBMISC_WE |
+ (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ WRCTL(CTL_TLBACC,
+ (MAX_PHYS_ADDR >> PAGE_SHIFT));
+ }
+ }
+
+ WRCTL(CTL_TLBMISC, org_misc);
+ }
+}
+
+void flush_tlb_all(void)
+{
+ int i;
+ unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE;
+ unsigned int way;
+ unsigned long org_misc, pid_misc, tlbmisc;
+
+ /* remember pid/way until we return */
+ get_misc_and_pid(&org_misc, &pid_misc);
+ pid_misc |= TLBMISC_WE;
+
+ /* Map each TLB entry to physcal address 0 with no-access and a
+ bad ptbase */
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ tlbmisc = pid_misc | (way << TLBMISC_WAY_SHIFT);
+ for (i = 0; i < cpuinfo.tlb_num_lines; i++) {
+ WRCTL(CTL_PTEADDR, ((vaddr) >> PAGE_SHIFT) << 2);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
+ vaddr += 1UL << 12;
+ }
+ }
+
+ /* restore pid/way */
+ WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void set_mmu_pid(unsigned long pid)
+{
+ WRCTL(CTL_TLBMISC, (RDCTL(CTL_TLBMISC) & TLBMISC_WAY) |
+ ((pid & TLBMISC_PID_MASK) << TLBMISC_PID_SHIFT));
+}
diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c
new file mode 100644
index 000000000000..7663e156ff4f
--- /dev/null
+++ b/arch/nios2/mm/uaccess.c
@@ -0,0 +1,163 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ */
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+asm(".global __copy_from_user\n"
+ " .type __copy_from_user, @function\n"
+ "__copy_from_user:\n"
+ " movi r2,7\n"
+ " mov r3,r4\n"
+ " bge r2,r6,1f\n"
+ " xor r2,r4,r5\n"
+ " andi r2,r2,3\n"
+ " movi r7,3\n"
+ " beq r2,zero,4f\n"
+ "1: addi r6,r6,-1\n"
+ " movi r2,-1\n"
+ " beq r6,r2,3f\n"
+ " mov r7,r2\n"
+ "2: ldbu r2,0(r5)\n"
+ " addi r6,r6,-1\n"
+ " addi r5,r5,1\n"
+ " stb r2,0(r3)\n"
+ " addi r3,r3,1\n"
+ " bne r6,r7,2b\n"
+ "3:\n"
+ " addi r2,r6,1\n"
+ " ret\n"
+ "13:mov r2,r6\n"
+ " ret\n"
+ "4: andi r2,r4,1\n"
+ " cmpeq r2,r2,zero\n"
+ " beq r2,zero,7f\n"
+ "5: andi r2,r3,2\n"
+ " beq r2,zero,6f\n"
+ "9: ldhu r2,0(r5)\n"
+ " addi r6,r6,-2\n"
+ " addi r5,r5,2\n"
+ " sth r2,0(r3)\n"
+ " addi r3,r3,2\n"
+ "6: bge r7,r6,1b\n"
+ "10:ldw r2,0(r5)\n"
+ " addi r6,r6,-4\n"
+ " addi r5,r5,4\n"
+ " stw r2,0(r3)\n"
+ " addi r3,r3,4\n"
+ " br 6b\n"
+ "7: ldbu r2,0(r5)\n"
+ " addi r6,r6,-1\n"
+ " addi r5,r5,1\n"
+ " addi r3,r4,1\n"
+ " stb r2,0(r4)\n"
+ " br 5b\n"
+ ".section __ex_table,\"a\"\n"
+ ".word 2b,3b\n"
+ ".word 9b,13b\n"
+ ".word 10b,13b\n"
+ ".word 7b,13b\n"
+ ".previous\n"
+ );
+EXPORT_SYMBOL(__copy_from_user);
+
+asm(
+ " .global __copy_to_user\n"
+ " .type __copy_to_user, @function\n"
+ "__copy_to_user:\n"
+ " movi r2,7\n"
+ " mov r3,r4\n"
+ " bge r2,r6,1f\n"
+ " xor r2,r4,r5\n"
+ " andi r2,r2,3\n"
+ " movi r7,3\n"
+ " beq r2,zero,4f\n"
+ /* Bail if we try to copy zero bytes */
+ "1: addi r6,r6,-1\n"
+ " movi r2,-1\n"
+ " beq r6,r2,3f\n"
+ /* Copy byte by byte for small copies and if src^dst != 0 */
+ " mov r7,r2\n"
+ "2: ldbu r2,0(r5)\n"
+ " addi r5,r5,1\n"
+ "9: stb r2,0(r3)\n"
+ " addi r6,r6,-1\n"
+ " addi r3,r3,1\n"
+ " bne r6,r7,2b\n"
+ "3: addi r2,r6,1\n"
+ " ret\n"
+ "13:mov r2,r6\n"
+ " ret\n"
+ /* If 'to' is an odd address byte copy */
+ "4: andi r2,r4,1\n"
+ " cmpeq r2,r2,zero\n"
+ " beq r2,zero,7f\n"
+ /* If 'to' is not divideable by four copy halfwords */
+ "5: andi r2,r3,2\n"
+ " beq r2,zero,6f\n"
+ " ldhu r2,0(r5)\n"
+ " addi r5,r5,2\n"
+ "10:sth r2,0(r3)\n"
+ " addi r6,r6,-2\n"
+ " addi r3,r3,2\n"
+ /* Copy words */
+ "6: bge r7,r6,1b\n"
+ " ldw r2,0(r5)\n"
+ " addi r5,r5,4\n"
+ "11:stw r2,0(r3)\n"
+ " addi r6,r6,-4\n"
+ " addi r3,r3,4\n"
+ " br 6b\n"
+ /* Copy remaining bytes */
+ "7: ldbu r2,0(r5)\n"
+ " addi r5,r5,1\n"
+ " addi r3,r4,1\n"
+ "12: stb r2,0(r4)\n"
+ " addi r6,r6,-1\n"
+ " br 5b\n"
+ ".section __ex_table,\"a\"\n"
+ ".word 9b,3b\n"
+ ".word 10b,13b\n"
+ ".word 11b,13b\n"
+ ".word 12b,13b\n"
+ ".previous\n");
+EXPORT_SYMBOL(__copy_to_user);
+
+long strncpy_from_user(char *__to, const char __user *__from, long __len)
+{
+ int l = strnlen_user(__from, __len);
+ int is_zt = 1;
+
+ if (l > __len) {
+ is_zt = 0;
+ l = __len;
+ }
+
+ if (l == 0 || copy_from_user(__to, __from, l))
+ return -EFAULT;
+
+ if (is_zt)
+ l--;
+ return l;
+}
+
+long strnlen_user(const char __user *s, long n)
+{
+ long i;
+
+ for (i = 0; i < n; i++) {
+ char c;
+
+ if (get_user(c, s + i) == -EFAULT)
+ return 0;
+ if (c == 0)
+ return i + 1;
+ }
+ return n + 1;
+}
diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform
new file mode 100644
index 000000000000..d3e5df9fb36b
--- /dev/null
+++ b/arch/nios2/platform/Kconfig.platform
@@ -0,0 +1,129 @@
+menu "Platform options"
+
+comment "Memory settings"
+
+config NIOS2_MEM_BASE
+ hex "Memory base address"
+ default "0x00000000"
+ help
+ This is the physical address of the memory that the kernel will run
+ from. This address is used to link the kernel and setup initial memory
+ management. You should take the raw memory address without any MMU
+ or cache bits set.
+ Please not that this address is used directly so you have to manually
+ do address translation if it's connected to a bridge.
+
+comment "Device tree"
+
+config NIOS2_DTB_AT_PHYS_ADDR
+ bool "DTB at physical address"
+ default n
+ help
+ When enabled you can select a physical address to load the dtb from.
+ Normally this address is passed by a bootloader such as u-boot but
+ using this you can use a devicetree without a bootloader.
+ This way you can store a devicetree in NOR flash or an onchip rom.
+ Please note that this address is used directly so you have to manually
+ do address translation if it's connected to a bridge. Also take into
+ account that when using an MMU you'd have to ad 0xC0000000 to your
+ address
+
+config NIOS2_DTB_PHYS_ADDR
+ hex "DTB Address"
+ depends on NIOS2_DTB_AT_PHYS_ADDR
+ default "0xC0000000"
+ help
+ Physical address of a dtb blob.
+
+config NIOS2_DTB_SOURCE_BOOL
+ bool "Compile and link device tree into kernel image"
+ default n
+ help
+ This allows you to specify a dts (device tree source) file
+ which will be compiled and linked into the kernel image.
+
+config NIOS2_DTB_SOURCE
+ string "Device tree source file"
+ depends on NIOS2_DTB_SOURCE_BOOL
+ default ""
+ help
+ Absolute path to the device tree source (dts) file describing your
+ system.
+
+comment "Nios II instructions"
+
+config NIOS2_HW_MUL_SUPPORT
+ bool "Enable MUL instruction"
+ default n
+ help
+ Set to true if you configured the Nios II to include the MUL
+ instruction. This will enable the -mhw-mul compiler flag.
+
+config NIOS2_HW_MULX_SUPPORT
+ bool "Enable MULX instruction"
+ default n
+ help
+ Set to true if you configured the Nios II to include the MULX
+ instruction. Enables the -mhw-mulx compiler flag.
+
+config NIOS2_HW_DIV_SUPPORT
+ bool "Enable DIV instruction"
+ default n
+ help
+ Set to true if you configured the Nios II to include the DIV
+ instruction. Enables the -mhw-div compiler flag.
+
+config NIOS2_FPU_SUPPORT
+ bool "Custom floating point instr support"
+ default n
+ help
+ Enables the -mcustom-fpu-cfg=60-1 compiler flag.
+
+config NIOS2_CI_SWAB_SUPPORT
+ bool "Byteswap custom instruction"
+ default n
+ help
+ Use the byteswap (endian converter) Nios II custom instruction provided
+ by Altera and which can be enabled in QSYS builder. This accelerates
+ endian conversions in the kernel (e.g. ntohs).
+
+config NIOS2_CI_SWAB_NO
+ int "Byteswap custom instruction number" if NIOS2_CI_SWAB_SUPPORT
+ default 0
+ help
+ Number of the instruction as configured in QSYS Builder.
+
+comment "Cache settings"
+
+config CUSTOM_CACHE_SETTINGS
+ bool "Custom cache settings"
+ help
+ This option allows you to tweak the cache settings used during early
+ boot (where the information from device tree is not yet available).
+ There should be no reason to change these values. Linux will work
+ perfectly fine, even if the Nios II is configured with smaller caches.
+
+ Say N here unless you know what you are doing.
+
+config NIOS2_DCACHE_SIZE
+ hex "D-Cache size" if CUSTOM_CACHE_SETTINGS
+ range 0x200 0x10000
+ default "0x800"
+ help
+ Maximum possible data cache size.
+
+config NIOS2_DCACHE_LINE_SIZE
+ hex "D-Cache line size" if CUSTOM_CACHE_SETTINGS
+ range 0x10 0x20
+ default "0x20"
+ help
+ Minimum possible data cache line size.
+
+config NIOS2_ICACHE_SIZE
+ hex "I-Cache size" if CUSTOM_CACHE_SETTINGS
+ range 0x200 0x10000
+ default "0x1000"
+ help
+ Maximum possible instruction cache size.
+
+endmenu
diff --git a/arch/nios2/platform/Makefile b/arch/nios2/platform/Makefile
new file mode 100644
index 000000000000..46364f1d9352
--- /dev/null
+++ b/arch/nios2/platform/Makefile
@@ -0,0 +1 @@
+obj-y += platform.o
diff --git a/arch/nios2/platform/platform.c b/arch/nios2/platform/platform.c
new file mode 100644
index 000000000000..d478773f758a
--- /dev/null
+++ b/arch/nios2/platform/platform.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011 Thomas Chou
+ * Copyright (C) 2011 Walter Goossens
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/io.h>
+
+static int __init nios2_soc_device_init(void)
+{
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ const char *machine;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (soc_dev_attr) {
+ machine = of_flat_dt_get_machine_name();
+ if (machine)
+ soc_dev_attr->machine = kasprintf(GFP_KERNEL, "%s",
+ machine);
+
+ soc_dev_attr->family = "Nios II";
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->machine);
+ kfree(soc_dev_attr);
+ }
+ }
+
+ return of_platform_populate(NULL, of_default_bus_match_table,
+ NULL, NULL);
+}
+
+device_initcall(nios2_soc_device_init);
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4006964d8e12..a5cb070b54bf 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -9,6 +9,8 @@
#include <asm/errno.h>
#include <asm-generic/uaccess-unaligned.h>
+#include <linux/bug.h>
+
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -28,11 +30,6 @@
* that put_user is the same as __put_user, etc.
*/
-extern int __get_kernel_bad(void);
-extern int __get_user_bad(void);
-extern int __put_kernel_bad(void);
-extern int __put_user_bad(void);
-
static inline long access_ok(int type, const void __user * addr,
unsigned long size)
{
@@ -43,8 +40,8 @@ static inline long access_ok(int type, const void __user * addr,
#define get_user __get_user
#if !defined(CONFIG_64BIT)
-#define LDD_KERNEL(ptr) __get_kernel_bad();
-#define LDD_USER(ptr) __get_user_bad();
+#define LDD_KERNEL(ptr) BUILD_BUG()
+#define LDD_USER(ptr) BUILD_BUG()
#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
#define ASM_WORD_INSN ".word\t"
@@ -94,7 +91,7 @@ struct exception_data {
case 2: __get_kernel_asm("ldh",ptr); break; \
case 4: __get_kernel_asm("ldw",ptr); break; \
case 8: LDD_KERNEL(ptr); break; \
- default: __get_kernel_bad(); break; \
+ default: BUILD_BUG(); break; \
} \
} \
else { \
@@ -103,7 +100,7 @@ struct exception_data {
case 2: __get_user_asm("ldh",ptr); break; \
case 4: __get_user_asm("ldw",ptr); break; \
case 8: LDD_USER(ptr); break; \
- default: __get_user_bad(); break; \
+ default: BUILD_BUG(); break; \
} \
} \
\
@@ -136,7 +133,7 @@ struct exception_data {
case 2: __put_kernel_asm("sth",__x,ptr); break; \
case 4: __put_kernel_asm("stw",__x,ptr); break; \
case 8: STD_KERNEL(__x,ptr); break; \
- default: __put_kernel_bad(); break; \
+ default: BUILD_BUG(); break; \
} \
} \
else { \
@@ -145,7 +142,7 @@ struct exception_data {
case 2: __put_user_asm("sth",__x,ptr); break; \
case 4: __put_user_asm("stw",__x,ptr); break; \
case 8: STD_USER(__x,ptr); break; \
- default: __put_user_bad(); break; \
+ default: BUILD_BUG(); break; \
} \
} \
\
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index 75196b415d3f..e0a23c7bdd43 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -1,13 +1,7 @@
#ifndef __ASM_PARISC_BITSPERLONG_H
#define __ASM_PARISC_BITSPERLONG_H
-/*
- * using CONFIG_* outside of __KERNEL__ is wrong,
- * __LP64__ was also removed from headers, so what
- * is the right approach on parisc?
- * -arnd
- */
-#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
+#if defined(__LP64__)
#define __BITS_PER_LONG 64
#define SHIFT_PER_LONG 6
#else
diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h
index fe88f2649418..342138983914 100644
--- a/arch/parisc/include/uapi/asm/msgbuf.h
+++ b/arch/parisc/include/uapi/asm/msgbuf.h
@@ -1,6 +1,8 @@
#ifndef _PARISC_MSGBUF_H
#define _PARISC_MSGBUF_H
+#include <asm/bitsperlong.h>
+
/*
* The msqid64_ds structure for parisc architecture, copied from sparc.
* Note extra padding because this structure is passed back and forth
@@ -13,15 +15,15 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad1;
#endif
__kernel_time_t msg_stime; /* last msgsnd time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad2;
#endif
__kernel_time_t msg_rtime; /* last msgrcv time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad3;
#endif
__kernel_time_t msg_ctime; /* last change time */
diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h
index 1e59ffd3bd1e..f01d89e30d73 100644
--- a/arch/parisc/include/uapi/asm/sembuf.h
+++ b/arch/parisc/include/uapi/asm/sembuf.h
@@ -1,6 +1,8 @@
#ifndef _PARISC_SEMBUF_H
#define _PARISC_SEMBUF_H
+#include <asm/bitsperlong.h>
+
/*
* The semid64_ds structure for parisc architecture.
* Note extra padding because this structure is passed back and forth
@@ -13,11 +15,11 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad1;
#endif
__kernel_time_t sem_otime; /* last semop time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad2;
#endif
__kernel_time_t sem_ctime; /* last change time */
diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h
index 0a3eada1863b..8496c38560c6 100644
--- a/arch/parisc/include/uapi/asm/shmbuf.h
+++ b/arch/parisc/include/uapi/asm/shmbuf.h
@@ -1,6 +1,8 @@
#ifndef _PARISC_SHMBUF_H
#define _PARISC_SHMBUF_H
+#include <asm/bitsperlong.h>
+
/*
* The shmid64_ds structure for parisc architecture.
* Note extra padding because this structure is passed back and forth
@@ -13,19 +15,19 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad1;
#endif
__kernel_time_t shm_atime; /* last attach time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad2;
#endif
__kernel_time_t shm_dtime; /* last detach time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad3;
#endif
__kernel_time_t shm_ctime; /* last change time */
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG != 64
unsigned int __pad4;
#endif
size_t shm_segsz; /* size of segment (bytes) */
@@ -36,23 +38,16 @@ struct shmid64_ds {
unsigned int __unused2;
};
-#ifdef CONFIG_64BIT
-/* The 'unsigned int' (formerly 'unsigned long') data types below will
- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
- * a wide kernel, but if some of these values are meant to contain pointers
- * they may need to be 'long long' instead. -PB XXX FIXME
- */
-#endif
struct shminfo64 {
- unsigned int shmmax;
- unsigned int shmmin;
- unsigned int shmmni;
- unsigned int shmseg;
- unsigned int shmall;
- unsigned int __unused1;
- unsigned int __unused2;
- unsigned int __unused3;
- unsigned int __unused4;
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
};
#endif /* _PARISC_SHMBUF_H */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index 10df7079f4cd..e26043b73f5d 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -85,7 +85,7 @@
struct siginfo;
/* Type of a signal handler. */
-#ifdef CONFIG_64BIT
+#if defined(__LP64__)
/* function pointers on 64-bit parisc are pointers to little structs and the
* compiler doesn't support code which changes or tests the address of
* the function in the little struct. This is really ugly -PB
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 8667f18be238..5f5c0373de63 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -833,8 +833,9 @@
#define __NR_seccomp (__NR_Linux + 338)
#define __NR_getrandom (__NR_Linux + 339)
#define __NR_memfd_create (__NR_Linux + 340)
+#define __NR_bpf (__NR_Linux + 341)
-#define __NR_Linux_syscalls (__NR_memfd_create + 1)
+#define __NR_Linux_syscalls (__NR_bpf + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index b563d9c8268b..fe4f0b89bf8f 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -286,11 +286,11 @@
ENTRY_COMP(msgsnd)
ENTRY_COMP(msgrcv)
ENTRY_SAME(msgget) /* 190 */
- ENTRY_SAME(msgctl)
- ENTRY_SAME(shmat)
+ ENTRY_COMP(msgctl)
+ ENTRY_COMP(shmat)
ENTRY_SAME(shmdt)
ENTRY_SAME(shmget)
- ENTRY_SAME(shmctl) /* 195 */
+ ENTRY_COMP(shmctl) /* 195 */
ENTRY_SAME(ni_syscall) /* streams1 */
ENTRY_SAME(ni_syscall) /* streams2 */
ENTRY_SAME(lstat64)
@@ -323,7 +323,7 @@
ENTRY_SAME(epoll_ctl) /* 225 */
ENTRY_SAME(epoll_wait)
ENTRY_SAME(remap_file_pages)
- ENTRY_SAME(semtimedop)
+ ENTRY_COMP(semtimedop)
ENTRY_COMP(mq_open)
ENTRY_SAME(mq_unlink) /* 230 */
ENTRY_COMP(mq_timedsend)
@@ -436,6 +436,7 @@
ENTRY_SAME(seccomp)
ENTRY_SAME(getrandom)
ENTRY_SAME(memfd_create) /* 340 */
+ ENTRY_SAME(bpf)
/* Nothing yet */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index a6774560afe3..493e72f64b35 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -70,39 +70,39 @@
#define CPU_UNKNOWN (~((u32)0))
/* Utility macros */
-#define SKIP_TO_NEXT_CPU(reg_entry) \
-({ \
- while (reg_entry->reg_id != REG_ID("CPUEND")) \
- reg_entry++; \
- reg_entry++; \
+#define SKIP_TO_NEXT_CPU(reg_entry) \
+({ \
+ while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \
+ reg_entry++; \
+ reg_entry++; \
})
/* Kernel Dump section info */
struct fadump_section {
- u32 request_flag;
- u16 source_data_type;
- u16 error_flags;
- u64 source_address;
- u64 source_len;
- u64 bytes_dumped;
- u64 destination_address;
+ __be32 request_flag;
+ __be16 source_data_type;
+ __be16 error_flags;
+ __be64 source_address;
+ __be64 source_len;
+ __be64 bytes_dumped;
+ __be64 destination_address;
};
/* ibm,configure-kernel-dump header. */
struct fadump_section_header {
- u32 dump_format_version;
- u16 dump_num_sections;
- u16 dump_status_flag;
- u32 offset_first_dump_section;
+ __be32 dump_format_version;
+ __be16 dump_num_sections;
+ __be16 dump_status_flag;
+ __be32 offset_first_dump_section;
/* Fields for disk dump option. */
- u32 dd_block_size;
- u64 dd_block_offset;
- u64 dd_num_blocks;
- u32 dd_offset_disk_path;
+ __be32 dd_block_size;
+ __be64 dd_block_offset;
+ __be64 dd_num_blocks;
+ __be32 dd_offset_disk_path;
/* Maximum time allowed to prevent an automatic dump-reboot. */
- u32 max_time_auto;
+ __be32 max_time_auto;
};
/*
@@ -174,15 +174,15 @@ static inline u64 str_to_u64(const char *str)
/* Register save area header. */
struct fadump_reg_save_area_header {
- u64 magic_number;
- u32 version;
- u32 num_cpu_offset;
+ __be64 magic_number;
+ __be32 version;
+ __be32 num_cpu_offset;
};
/* Register entry. */
struct fadump_reg_entry {
- u64 reg_id;
- u64 reg_value;
+ __be64 reg_id;
+ __be64 reg_value;
};
/* fadump crash info structure */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 4ca90a39d6d0..725247beebec 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -159,8 +159,6 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
- bool force_32bit_msi;
-
struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
struct eeh_dev *edev; /* eeh device */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index e9a9f60e596d..fc3ee06eab87 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -3,7 +3,6 @@
#ifdef __KERNEL__
#include <linux/mm.h>
-#include <asm-generic/tlb.h>
#ifdef CONFIG_PPC_BOOK3E
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
@@ -14,6 +13,8 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
}
#endif /* !CONFIG_PPC_BOOK3E */
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e2b428b0f7ba..20733fa518ae 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -27,6 +27,7 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
extern void tlb_flush(struct mmu_gather *tlb);
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index f19b1e5cb060..1ceecdda810b 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
return -ENODEV;
state = eeh_ops->get_state(edev->pe, NULL);
- return sprintf(buf, "%0x08x %0x08x\n",
+ return sprintf(buf, "0x%08x 0x%08x\n",
state, edev->pe->state);
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 5bbd1bc8c3b0..0905c8da90f1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite)
3:
#endif
bl save_nvgprs
+ /*
+ * Use a non volatile GPR to save and restore our thread_info flags
+ * across the call to restore_interrupts.
+ */
+ mr r30,r4
bl restore_interrupts
+ mr r4,r30
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_notify_resume
b ret_from_except
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 742694c1d852..26d091a1a54c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -58,7 +58,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
const __be32 *sections;
int i, num_sections;
int size;
- const int *token;
+ const __be32 *token;
if (depth != 1 || strcmp(uname, "rtas") != 0)
return 0;
@@ -72,7 +72,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
return 1;
fw_dump.fadump_supported = 1;
- fw_dump.ibm_configure_kernel_dump = *token;
+ fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token);
/*
* The 'ibm,kernel-dump' rtas node is present only if there is
@@ -147,11 +147,11 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
memset(fdm, 0, sizeof(struct fadump_mem_struct));
addr = addr & PAGE_MASK;
- fdm->header.dump_format_version = 0x00000001;
- fdm->header.dump_num_sections = 3;
+ fdm->header.dump_format_version = cpu_to_be32(0x00000001);
+ fdm->header.dump_num_sections = cpu_to_be16(3);
fdm->header.dump_status_flag = 0;
fdm->header.offset_first_dump_section =
- (u32)offsetof(struct fadump_mem_struct, cpu_state_data);
+ cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data));
/*
* Fields for disk dump option.
@@ -167,27 +167,27 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
/* Kernel dump sections */
/* cpu state data section. */
- fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG;
- fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA;
+ fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
+ fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA);
fdm->cpu_state_data.source_address = 0;
- fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size;
- fdm->cpu_state_data.destination_address = addr;
+ fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size);
+ fdm->cpu_state_data.destination_address = cpu_to_be64(addr);
addr += fw_dump.cpu_state_data_size;
/* hpte region section */
- fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG;
- fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION;
+ fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
+ fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION);
fdm->hpte_region.source_address = 0;
- fdm->hpte_region.source_len = fw_dump.hpte_region_size;
- fdm->hpte_region.destination_address = addr;
+ fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size);
+ fdm->hpte_region.destination_address = cpu_to_be64(addr);
addr += fw_dump.hpte_region_size;
/* RMA region section */
- fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG;
- fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION;
- fdm->rmr_region.source_address = RMA_START;
- fdm->rmr_region.source_len = fw_dump.boot_memory_size;
- fdm->rmr_region.destination_address = addr;
+ fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
+ fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION);
+ fdm->rmr_region.source_address = cpu_to_be64(RMA_START);
+ fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size);
+ fdm->rmr_region.destination_address = cpu_to_be64(addr);
addr += fw_dump.boot_memory_size;
return addr;
@@ -272,7 +272,7 @@ int __init fadump_reserve_mem(void)
* first kernel.
*/
if (fdm_active)
- fw_dump.boot_memory_size = fdm_active->rmr_region.source_len;
+ fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len);
else
fw_dump.boot_memory_size = fadump_calculate_reserve_size();
@@ -314,8 +314,8 @@ int __init fadump_reserve_mem(void)
(unsigned long)(base >> 20));
fw_dump.fadumphdr_addr =
- fdm_active->rmr_region.destination_address +
- fdm_active->rmr_region.source_len;
+ be64_to_cpu(fdm_active->rmr_region.destination_address) +
+ be64_to_cpu(fdm_active->rmr_region.source_len);
pr_debug("fadumphdr_addr = %p\n",
(void *) fw_dump.fadumphdr_addr);
} else {
@@ -472,9 +472,9 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
- while (reg_entry->reg_id != REG_ID("CPUEND")) {
- fadump_set_regval(regs, reg_entry->reg_id,
- reg_entry->reg_value);
+ while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) {
+ fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id),
+ be64_to_cpu(reg_entry->reg_value));
reg_entry++;
}
reg_entry++;
@@ -603,20 +603,20 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
if (!fdm->cpu_state_data.bytes_dumped)
return -EINVAL;
- addr = fdm->cpu_state_data.destination_address;
+ addr = be64_to_cpu(fdm->cpu_state_data.destination_address);
vaddr = __va(addr);
reg_header = vaddr;
- if (reg_header->magic_number != REGSAVE_AREA_MAGIC) {
+ if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) {
printk(KERN_ERR "Unable to read register save area.\n");
return -ENOENT;
}
pr_debug("--------CPU State Data------------\n");
- pr_debug("Magic Number: %llx\n", reg_header->magic_number);
- pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset);
+ pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number));
+ pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset));
- vaddr += reg_header->num_cpu_offset;
- num_cpus = *((u32 *)(vaddr));
+ vaddr += be32_to_cpu(reg_header->num_cpu_offset);
+ num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
pr_debug("NumCpus : %u\n", num_cpus);
vaddr += sizeof(u32);
reg_entry = (struct fadump_reg_entry *)vaddr;
@@ -639,13 +639,13 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
fdh = __va(fw_dump.fadumphdr_addr);
for (i = 0; i < num_cpus; i++) {
- if (reg_entry->reg_id != REG_ID("CPUSTRT")) {
+ if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) {
printk(KERN_ERR "Unable to read CPU state data\n");
rc = -ENOENT;
goto error_out;
}
/* Lower 4 bytes of reg_value contains logical cpu id */
- cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK;
+ cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
SKIP_TO_NEXT_CPU(reg_entry);
continue;
@@ -692,7 +692,7 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return -EINVAL;
/* Check if the dump data is valid. */
- if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) ||
+ if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) ||
(fdm_active->cpu_state_data.error_flags != 0) ||
(fdm_active->rmr_region.error_flags != 0)) {
printk(KERN_ERR "Dump taken by platform is not valid\n");
@@ -828,7 +828,7 @@ static void fadump_setup_crash_memory_ranges(void)
static inline unsigned long fadump_relocate(unsigned long paddr)
{
if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
- return fdm.rmr_region.destination_address + paddr;
+ return be64_to_cpu(fdm.rmr_region.destination_address) + paddr;
else
return paddr;
}
@@ -902,7 +902,7 @@ static int fadump_create_elfcore_headers(char *bufp)
* to the specified destination_address. Hence set
* the correct offset.
*/
- phdr->p_offset = fdm.rmr_region.destination_address;
+ phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address);
}
phdr->p_paddr = mbase;
@@ -951,7 +951,7 @@ static void register_fadump(void)
fadump_setup_crash_memory_ranges();
- addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len;
+ addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
addr = init_fadump_header(addr);
vaddr = __va(addr);
@@ -1023,7 +1023,7 @@ void fadump_cleanup(void)
/* Invalidate the registration only if dump is active. */
if (fw_dump.dump_active) {
init_fadump_mem_struct(&fdm,
- fdm_active->cpu_state_data.destination_address);
+ be64_to_cpu(fdm_active->cpu_state_data.destination_address));
fadump_invalidate_dump(&fdm);
}
}
@@ -1063,7 +1063,7 @@ static void fadump_invalidate_release_mem(void)
return;
}
- destination_address = fdm_active->cpu_state_data.destination_address;
+ destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
fadump_cleanup();
mutex_unlock(&fadump_mutex);
@@ -1183,31 +1183,31 @@ static int fadump_region_show(struct seq_file *m, void *private)
seq_printf(m,
"CPU : [%#016llx-%#016llx] %#llx bytes, "
"Dumped: %#llx\n",
- fdm_ptr->cpu_state_data.destination_address,
- fdm_ptr->cpu_state_data.destination_address +
- fdm_ptr->cpu_state_data.source_len - 1,
- fdm_ptr->cpu_state_data.source_len,
- fdm_ptr->cpu_state_data.bytes_dumped);
+ be64_to_cpu(fdm_ptr->cpu_state_data.destination_address),
+ be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) +
+ be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1,
+ be64_to_cpu(fdm_ptr->cpu_state_data.source_len),
+ be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped));
seq_printf(m,
"HPTE: [%#016llx-%#016llx] %#llx bytes, "
"Dumped: %#llx\n",
- fdm_ptr->hpte_region.destination_address,
- fdm_ptr->hpte_region.destination_address +
- fdm_ptr->hpte_region.source_len - 1,
- fdm_ptr->hpte_region.source_len,
- fdm_ptr->hpte_region.bytes_dumped);
+ be64_to_cpu(fdm_ptr->hpte_region.destination_address),
+ be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
+ be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
+ be64_to_cpu(fdm_ptr->hpte_region.source_len),
+ be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
seq_printf(m,
"DUMP: [%#016llx-%#016llx] %#llx bytes, "
"Dumped: %#llx\n",
- fdm_ptr->rmr_region.destination_address,
- fdm_ptr->rmr_region.destination_address +
- fdm_ptr->rmr_region.source_len - 1,
- fdm_ptr->rmr_region.source_len,
- fdm_ptr->rmr_region.bytes_dumped);
+ be64_to_cpu(fdm_ptr->rmr_region.destination_address),
+ be64_to_cpu(fdm_ptr->rmr_region.destination_address) +
+ be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1,
+ be64_to_cpu(fdm_ptr->rmr_region.source_len),
+ be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
if (!fdm_active ||
(fw_dump.reserve_dump_area_start ==
- fdm_ptr->cpu_state_data.destination_address))
+ be64_to_cpu(fdm_ptr->cpu_state_data.destination_address)))
goto out;
/* Dump is active. Show reserved memory region. */
@@ -1215,10 +1215,10 @@ static int fadump_region_show(struct seq_file *m, void *private)
" : [%#016llx-%#016llx] %#llx bytes, "
"Dumped: %#llx\n",
(unsigned long long)fw_dump.reserve_dump_area_start,
- fdm_ptr->cpu_state_data.destination_address - 1,
- fdm_ptr->cpu_state_data.destination_address -
+ be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1,
+ be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
fw_dump.reserve_dump_area_start,
- fdm_ptr->cpu_state_data.destination_address -
+ be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
fw_dump.reserve_dump_area_start);
out:
if (fdm_active)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 155013da27e0..b15194e2c5fc 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
-
-static void quirk_radeon_32bit_msi(struct pci_dev *dev)
-{
- struct pci_dn *pdn = pci_get_pdn(dev);
-
- if (pdn)
- pdn->force_32bit_msi = true;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
index 23eb9a9441bd..c62be60c7274 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -30,8 +30,8 @@
V_FUNCTION_BEGIN(__kernel_getcpu)
.cfi_startproc
mfspr r5,SPRN_SPRG_VDSO_READ
- cmpdi cr0,r3,0
- cmpdi cr1,r4,0
+ cmpwi cr0,r3,0
+ cmpwi cr1,r4,0
clrlwi r6,r5,16
rlwinm r7,r5,16,31-15,31-0
beq cr0,1f
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e70ae968e5f..6a4a5fcb9730 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -517,8 +517,6 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
for (i = 0; i < num_hugepd; i++, hpdp++)
hpdp->pd = 0;
- tlb->need_flush = 1;
-
#ifdef CONFIG_PPC_FSL_BOOK3E
hugepd_free(tlb, hugepte);
#else
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index cad68ff8eca5..415a51b028b9 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -103,7 +103,7 @@ unsigned long __max_low_memory = MAX_LOW_MEM;
/*
* Check for command-line options that affect what MMU_init will do.
*/
-void MMU_setup(void)
+void __init MMU_setup(void)
{
/* Check for nobats option (used in mapin_ram). */
if (strstr(boot_command_line, "nobats")) {
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 5e1ed1575aab..b322bfb51343 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
};
/* Print things out */
- if (hmi_evt->version != OpalHMIEvt_V1) {
+ if (hmi_evt->version < OpalHMIEvt_V1) {
pr_err("HMI Interrupt, Unknown event version %d !\n",
hmi_evt->version);
return;
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index ad4b31df779a..e4169d68cb32 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -216,14 +216,54 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
&data, len);
if (rc)
return -ENXIO;
+
+ /*
+ * Now there is some trickery with the data returned by OPAL
+ * as it's the desired data right justified in a 32-bit BE
+ * word.
+ *
+ * This is a very bad interface and I'm to blame for it :-(
+ *
+ * So we can't just apply a 32-bit swap to what comes from OPAL,
+ * because user space expects the *bytes* to be in their proper
+ * respective positions (ie, LPC position).
+ *
+ * So what we really want to do here is to shift data right
+ * appropriately on a LE kernel.
+ *
+ * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that
+ * order, we have in memory written to by OPAL at the "data"
+ * pointer:
+ *
+ * Bytes: OPAL "data" LE "data"
+ * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0
+ * 16-bit: B0 B1 0000B0B1 B1B00000
+ * 8-bit: B0 000000B0 B0000000
+ *
+ * So a BE kernel will have the leftmost of the above in the MSB
+ * and rightmost in the LSB and can just then "cast" the u32 "data"
+ * down to the appropriate quantity and write it.
+ *
+ * However, an LE kernel can't. It doesn't need to swap because a
+ * load from data followed by a store to user are going to preserve
+ * the byte ordering which is the wire byte order which is what the
+ * user wants, but in order to "crop" to the right size, we need to
+ * shift right first.
+ */
switch(len) {
case 4:
rc = __put_user((u32)data, (u32 __user *)ubuf);
break;
case 2:
+#ifdef __LITTLE_ENDIAN__
+ data >>= 16;
+#endif
rc = __put_user((u16)data, (u16 __user *)ubuf);
break;
default:
+#ifdef __LITTLE_ENDIAN__
+ data >>= 24;
+#endif
rc = __put_user((u8)data, (u8 __user *)ubuf);
break;
}
@@ -263,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf,
else if (todo > 1 && (pos & 1) == 0)
len = 2;
}
+
+ /*
+ * Similarly to the read case, we have some trickery here but
+ * it's different to handle. We need to pass the value to OPAL in
+ * a register whose layout depends on the access size. We want
+ * to reproduce the memory layout of the user, however we aren't
+ * doing a load from user and a store to another memory location
+ * which would achieve that. Here we pass the value to OPAL via
+ * a register which is expected to contain the "BE" interpretation
+ * of the byte sequence. IE: for a 32-bit access, byte 0 should be
+ * in the MSB. So here we *do* need to byteswap on LE.
+ *
+ * User bytes: LE "data" OPAL "data"
+ * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3
+ * 16-bit: B0 B1 0000B1B0 0000B0B1
+ * 8-bit: B0 000000B0 000000B0
+ */
switch(len) {
case 4:
rc = __get_user(data, (u32 __user *)ubuf);
+ data = cpu_to_be32(data);
break;
case 2:
rc = __get_user(data, (u16 __user *)ubuf);
+ data = cpu_to_be16(data);
break;
default:
rc = __get_user(data, (u8 __user *)ubuf);
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 10271ad1fac4..4ab67ef7abc9 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -20,7 +20,9 @@
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/of_platform.h>
#include <asm/opal.h>
+#include <asm/machdep.h>
static DEFINE_MUTEX(opal_sensor_mutex);
@@ -64,3 +66,21 @@ out:
return ret;
}
EXPORT_SYMBOL_GPL(opal_get_sensor_data);
+
+static __init int opal_sensor_init(void)
+{
+ struct platform_device *pdev;
+ struct device_node *sensor;
+
+ sensor = of_find_node_by_path("/ibm,opal/sensors");
+ if (!sensor) {
+ pr_err("Opal node 'sensors' not found\n");
+ return -ENODEV;
+ }
+
+ pdev = of_platform_device_create(sensor, "opal-sensor", NULL);
+ of_node_put(sensor);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+machine_subsys_initcall(powernv, opal_sensor_init);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 468a0f23c7f2..3ba435ec3dcd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
unsigned int is_64, struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
- struct pci_dn *pdn = pci_get_pdn(dev);
unsigned int xive_num = hwirq - phb->msi_base;
__be32 data;
int rc;
@@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
return -ENXIO;
/* Force 32-bit MSI on some broken devices */
- if (pdn && pdn->force_32bit_msi)
+ if (dev->no_64bit_msi)
is_64 = 0;
/* Assign XIVE to PE */
@@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (is_kdump_kernel()) {
pr_info(" Issue PHB reset ...\n");
ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
- ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
+ ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
}
/* Configure M64 window */
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b2187d0068b8..4b20f2c6b3b2 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pci_get_pdn(pdev);
struct msi_desc *entry;
struct msi_msg msg;
int hwirq;
@@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
return -ENODEV;
- if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+ if (pdev->no_64bit_msi && !phb->msi32_support)
return -ENODEV;
list_for_each_entry(entry, &pdev->msi_list, list) {
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 6ad83bd11fe2..c22bb1b4beb8 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -382,7 +382,7 @@ static int dlpar_online_cpu(struct device_node *dn)
BUG_ON(get_cpu_current_state(cpu)
!= CPU_STATE_OFFLINE);
cpu_maps_update_done();
- rc = cpu_up(cpu);
+ rc = device_online(get_cpu_device(cpu));
if (rc)
goto out;
cpu_maps_update_begin();
@@ -467,7 +467,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
cpu_maps_update_done();
- rc = cpu_down(cpu);
+ rc = device_offline(get_cpu_device(cpu));
if (rc)
goto out;
cpu_maps_update_begin();
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8c509d5397c6..f6880d2a40fb 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
+#include <asm/fadump.h>
#include "pseries.h"
@@ -247,8 +248,17 @@ static void pSeries_lpar_hptab_clear(void)
}
#ifdef __LITTLE_ENDIAN__
- /* Reset exceptions to big endian */
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ /*
+ * Reset exceptions to big endian.
+ *
+ * FIXME this is a hack for kexec, we need to reset the exception
+ * endian before starting the new kernel and this is a convenient place
+ * to do it.
+ *
+ * This is also called on boot when a fadump happens. In that case we
+ * must not change the exception endian mode.
+ */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
long rc;
rc = pseries_big_endian_exceptions();
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 8ab5add4ac82..8b909e94fd9a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
*/
again:
if (type == PCI_CAP_ID_MSI) {
- if (pdn->force_32bit_msi) {
+ if (pdev->no_64bit_msi) {
rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
if (rc < 0) {
/*
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index de40b48b460e..da08ed088157 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
cascade_data->virq = virt_msir;
msi->cascade_array[irq_index] = cascade_data;
- ret = request_irq(virt_msir, fsl_msi_cascade, 0,
+ ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
"fsl-msi-cascade", cascade_data);
if (ret) {
dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index b988b5addf86..c8efbb37d6e0 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -293,10 +293,10 @@ static inline void disable_surveillance(void)
args.token = rtas_token("set-indicator");
if (args.token == RTAS_UNKNOWN_SERVICE)
return;
- args.nargs = 3;
- args.nret = 1;
+ args.nargs = cpu_to_be32(3);
+ args.nret = cpu_to_be32(1);
args.rets = &args.args[3];
- args.args[0] = SURVEILLANCE_TOKEN;
+ args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
args.args[1] = 0;
args.args[2] = 0;
enter_rtas(__pa(&args));
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 9d94fdd9f525..9432d0f202ef 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -245,6 +244,7 @@ CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -252,11 +252,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -270,6 +265,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -286,9 +282,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -374,14 +367,13 @@ CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
@@ -427,7 +419,6 @@ CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -481,14 +472,14 @@ CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
CONFIG_OCFS2_FS=m
-CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FANOTIFY=y
@@ -574,7 +565,6 @@ CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -600,8 +590,13 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
@@ -609,7 +604,10 @@ CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_STRING_HELPERS=y
+CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
+CONFIG_TEST_BPF=m
# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
@@ -673,12 +671,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_CORDIC=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 90f514baa37d..219dca6ea926 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -243,6 +242,7 @@ CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -250,11 +250,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -268,6 +263,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -284,9 +280,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -371,14 +364,13 @@ CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
@@ -424,7 +416,6 @@ CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -478,13 +469,13 @@ CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
CONFIG_OCFS2_FS=m
-CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FANOTIFY=y
@@ -626,12 +617,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_CORDIC=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 13559d32af69..822c2f2e0c25 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -33,7 +33,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -241,6 +240,7 @@ CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -248,11 +248,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -266,6 +261,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -282,9 +278,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -369,14 +362,13 @@ CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
@@ -422,7 +414,6 @@ CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -476,13 +467,13 @@ CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
CONFIG_OCFS2_FS=m
-CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FANOTIFY=y
@@ -550,8 +541,11 @@ CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
@@ -618,12 +612,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
CONFIG_CORDIC=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e376789f2d8d..9d63051ebec4 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -22,8 +22,8 @@ CONFIG_HZ_100=y
CONFIG_CRASH_DUMP=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SECCOMP is not set
-# CONFIG_IUCV is not set
CONFIG_NET=y
+# CONFIG_IUCV is not set
CONFIG_ATM=y
CONFIG_ATM_LANE=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -36,9 +36,9 @@ CONFIG_ENCLOSURE_SERVICES=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_ENCLOSURE=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SRP_ATTRS=y
CONFIG_ZFCP=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -75,12 +75,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
# CONFIG_STRICT_DEVMEM is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS_FS is not set
# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index fab35a8efa4f..785c5f24d6f9 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -92,10 +92,10 @@ CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=y
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=y
CONFIG_NETDEVICES=y
@@ -164,14 +164,13 @@ CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 51d14fe5eb9a..ca1cabb3a96c 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -121,6 +121,8 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
{
struct ftrace_graph_ent trace;
+ if (unlikely(ftrace_graph_is_dead()))
+ goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index dd1c24ceda50..3f51cf4e8f02 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
*/
local_irq_save(flags);
local_mcck_disable();
- /*
- * Ummm... Does this make sense at all? Copying the percpu struct
- * and then zapping it one statement later?
- */
- memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
- memset(&mcck, 0, sizeof(struct mcck_struct));
+ mcck = *this_cpu_ptr(&cpu_mcck);
+ memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable();
local_irq_restore(flags);
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 48c2206a3956..5eec9afbb5b5 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -19,6 +19,7 @@
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
.cfi_startproc
+ ahi %r15,-16
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME_COARSE
@@ -34,8 +35,8 @@ __kernel_clock_gettime:
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
- stcke 24(%r15) /* Store TOD clock */
- lm %r0,%r1,25(%r15)
+ stcke 0(%r15) /* Store TOD clock */
+ lm %r0,%r1,1(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
@@ -70,6 +71,7 @@ __kernel_clock_gettime:
8: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
+ ahi %r15,16
br %r14
/* CLOCK_MONOTONIC_COARSE */
@@ -96,8 +98,8 @@ __kernel_clock_gettime:
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
- stcke 24(%r15) /* Store TOD clock */
- lm %r0,%r1,25(%r15)
+ stcke 0(%r15) /* Store TOD clock */
+ lm %r0,%r1,1(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
@@ -132,11 +134,13 @@ __kernel_clock_gettime:
17: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
+ ahi %r15,16
br %r14
/* Fallback to system call */
19: lhi %r1,__NR_clock_gettime
svc 0
+ ahi %r15,16
br %r14
20: .long 1000000000
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 60def5f562db..719de6186b20 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -19,6 +19,7 @@
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
.cfi_startproc
+ ahi %r15,-16
basr %r5,0
0: al %r5,13f-0b(%r5) /* get &_vdso_data */
1: ltr %r3,%r3 /* check if tz is NULL */
@@ -29,30 +30,30 @@ __kernel_gettimeofday:
l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
- stcke 24(%r15) /* Store TOD clock */
- lm %r0,%r1,25(%r15)
+ stcke 0(%r15) /* Store TOD clock */
+ lm %r0,%r1,1(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
- st %r0,24(%r15)
+ st %r0,0(%r15)
l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 4f
a %r0,__VDSO_TK_MULT(%r5)
-4: al %r0,24(%r15)
+4: al %r0,0(%r15)
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
-5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
+5: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
srdl %r0,0(%r4) /* >> tk->shift */
- l %r4,24(%r15) /* get tv_sec from stack */
+ l %r4,0(%r15) /* get tv_sec from stack */
basr %r5,0
6: ltr %r0,%r0
jnz 7f
@@ -71,6 +72,7 @@ __kernel_gettimeofday:
9: srl %r0,6
st %r0,4(%r2) /* store tv->tv_usec */
10: slr %r2,%r2
+ ahi %r15,16
br %r14
11: .long 1000000000
12: .long 274877907
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 9d9761f8e110..7699e735ae28 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -19,6 +19,7 @@
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
.cfi_startproc
+ aghi %r15,-16
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
@@ -37,10 +38,10 @@ __kernel_clock_gettime:
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
- stcke 48(%r15) /* Store TOD clock */
+ stcke 0(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r0,__VDSO_WTOM_SEC(%r5)
- lg %r1,49(%r15)
+ lg %r1,1(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_WTOM_NSEC(%r5)
@@ -56,6 +57,7 @@ __kernel_clock_gettime:
2: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
+ aghi %r15,16
br %r14
/* CLOCK_MONOTONIC_COARSE */
@@ -82,9 +84,9 @@ __kernel_clock_gettime:
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
- stcke 48(%r15) /* Store TOD clock */
+ stcke 0(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- lg %r1,49(%r15)
+ lg %r1,1(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
@@ -101,6 +103,7 @@ __kernel_clock_gettime:
7: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
+ aghi %r15,16
br %r14
/* CLOCK_THREAD_CPUTIME_ID for this thread */
@@ -134,11 +137,13 @@ __kernel_clock_gettime:
slgr %r4,%r0 /* r4 = tv_nsec */
stg %r4,8(%r3)
lghi %r2,0
+ aghi %r15,16
br %r14
/* Fallback to system call */
12: lghi %r1,__NR_clock_gettime
svc 0
+ aghi %r15,16
br %r14
13: .quad 1000000000
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 7a344995a97f..6ce46707663c 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -19,6 +19,7 @@
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
.cfi_startproc
+ aghi %r15,-16
larl %r5,_vdso_data
0: ltgr %r3,%r3 /* check if tz is NULL */
je 1f
@@ -28,8 +29,8 @@ __kernel_gettimeofday:
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
- stcke 48(%r15) /* Store TOD clock */
- lg %r1,49(%r15)
+ stcke 0(%r15) /* Store TOD clock */
+ lg %r1,1(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
@@ -50,6 +51,7 @@ __kernel_gettimeofday:
srlg %r0,%r0,6
stg %r0,8(%r2) /* store tv->tv_usec */
4: lghi %r2,0
+ aghi %r15,16
br %r14
5: .quad 1000000000
.long 274877907
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 416f2a323ba5..7f0089d9a4aa 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -66,7 +66,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
clock = S390_lowcore.last_update_clock;
asm volatile(
" stpt %0\n" /* Store current cpu timer value */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ " stckf %1" /* Store current tod clock value */
+#else
" stck %1" /* Store current tod clock value */
+#endif
: "=m" (S390_lowcore.last_update_timer),
"=m" (S390_lowcore.last_update_clock));
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 765c1776ec9f..0e69b7e7a439 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -22,7 +22,7 @@
int atomic_add_return(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 32c29a133f9d..d38b52dca216 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -11,22 +11,14 @@
#ifndef __ARCH_SPARC_CMPXCHG__
#define __ARCH_SPARC_CMPXCHG__
-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
-{
- __asm__ __volatile__("swap [%2], %0"
- : "=&r" (val)
- : "0" (val), "r" (m)
- : "memory");
- return val;
-}
-
+unsigned long __xchg_u32(volatile u32 *m, u32 new);
void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch (size) {
case 4:
- return xchg_u32(ptr, x);
+ return __xchg_u32(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 5b1b52a04ad6..7e064c68c5ec 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
+ * routine can be a nop.
+ */
+}
+
extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops *leon_dma_ops;
extern struct dma_map_ops pci32_dma_ops;
diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h
index a34ad079487e..4c7c12d69bea 100644
--- a/arch/sparc/include/uapi/asm/swab.h
+++ b/arch/sparc/include/uapi/asm/swab.h
@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
{
__u16 ret;
- __asm__ __volatile__ ("lduha [%1] %2, %0"
+ __asm__ __volatile__ ("lduha [%2] %3, %0"
: "=r" (ret)
- : "r" (addr), "i" (ASI_PL));
+ : "m" (*addr), "r" (addr), "i" (ASI_PL));
return ret;
}
#define __arch_swab16p __arch_swab16p
@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
{
__u32 ret;
- __asm__ __volatile__ ("lduwa [%1] %2, %0"
+ __asm__ __volatile__ ("lduwa [%2] %3, %0"
: "=r" (ret)
- : "r" (addr), "i" (ASI_PL));
+ : "m" (*addr), "r" (addr), "i" (ASI_PL));
return ret;
}
#define __arch_swab32p __arch_swab32p
@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
{
__u64 ret;
- __asm__ __volatile__ ("ldxa [%1] %2, %0"
+ __asm__ __volatile__ ("ldxa [%2] %3, %0"
: "=r" (ret)
- : "r" (addr), "i" (ASI_PL));
+ : "m" (*addr), "r" (addr), "i" (ASI_PL));
return ret;
}
#define __arch_swab64p __arch_swab64p
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 8f76f23dac38..f9c6813c132d 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
{
unsigned long csr_reg, csr, csr_error_bits;
irqreturn_t ret = IRQ_NONE;
- u16 stat;
+ u32 stat;
csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
csr = upa_readq(csr_reg);
@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
pbm->name);
ret = IRQ_HANDLED;
}
- pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+ pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
- pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+ pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 302c476413d5..da6f1a7fc4db 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu)
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
+ irq_enter();
generic_smp_call_function_interrupt();
+ irq_exit();
}
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
+ irq_enter();
generic_smp_call_function_single_interrupt();
+ irq_exit();
}
static void tsb_sync(void *info)
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index a7c418ac26af..71cd65ab200c 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -45,6 +45,19 @@ ATOMIC_OP(add, +=)
#undef ATOMIC_OP
+int atomic_xchg(atomic_t *v, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+ ret = v->counter;
+ v->counter = new;
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+ return ret;
+}
+EXPORT_SYMBOL(atomic_xchg);
+
int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -137,3 +150,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
return (unsigned long)prev;
}
EXPORT_SYMBOL(__cmpxchg_u32);
+
+unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
+{
+ unsigned long flags;
+ u32 prev;
+
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+ prev = *ptr;
+ *ptr = new;
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+ return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__xchg_u32);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ded8a6774ac9..41a503c15862 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -144,7 +144,7 @@ config INSTRUCTION_DECODER
config PERF_EVENTS_INTEL_UNCORE
def_bool y
- depends on PERF_EVENTS && SUP_SUP_INTEL && PCI
+ depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
config OUTPUT_FORMAT
string
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 704f58aa79cd..45abc363dd3e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -76,8 +76,10 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4
+RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
+ perl $(srctree)/arch/x86/tools/calc_run_size.pl)
quiet_cmd_mkpiggy = MKPIGGY $@
- cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
+ cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
targets += piggy.S
$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index cbed1407a5cd..1d7fbbcc196d 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -207,7 +207,8 @@ relocated:
* Do the decompression, and jump to the new kernel..
*/
/* push arguments for decompress_kernel: */
- pushl $z_output_len /* decompressed length */
+ pushl $z_run_size /* size of kernel with .bss and .brk */
+ pushl $z_output_len /* decompressed length, end of relocs */
leal z_extract_offset_negative(%ebx), %ebp
pushl %ebp /* output address */
pushl $z_input_len /* input_len */
@@ -217,7 +218,7 @@ relocated:
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
call decompress_kernel /* returns kernel location in %eax */
- addl $24, %esp
+ addl $28, %esp
/*
* Jump to the decompressed kernel.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 2884e0c3e8a5..6b1766c6c082 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -402,13 +402,16 @@ relocated:
* Do the decompression, and jump to the new kernel..
*/
pushq %rsi /* Save the real mode argument */
+ movq $z_run_size, %r9 /* size of kernel with .bss and .brk */
+ pushq %r9
movq %rsi, %rdi /* real mode address */
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
leaq input_data(%rip), %rdx /* input_data */
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
- movq $z_output_len, %r9 /* decompressed length */
+ movq $z_output_len, %r9 /* decompressed length, end of relocs */
call decompress_kernel /* returns kernel location in %rax */
+ popq %r9
popq %rsi
/*
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 57ab74df7eea..30dd59a9f0b4 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -358,7 +358,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,
- unsigned long output_len)
+ unsigned long output_len,
+ unsigned long run_size)
{
real_mode = rmode;
@@ -381,8 +382,14 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
- output = choose_kernel_location(input_data, input_len,
- output, output_len);
+ /*
+ * The memory hole needed for the kernel is the larger of either
+ * the entire decompressed kernel plus relocation table, or the
+ * entire decompressed kernel plus .bss and .brk sections.
+ */
+ output = choose_kernel_location(input_data, input_len, output,
+ output_len > run_size ? output_len
+ : run_size);
/* Validate memory location choices. */
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index b669ab65bf6c..d8222f213182 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -36,11 +36,13 @@ int main(int argc, char *argv[])
uint32_t olen;
long ilen;
unsigned long offs;
+ unsigned long run_size;
FILE *f = NULL;
int retval = 1;
- if (argc < 2) {
- fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
+ if (argc < 3) {
+ fprintf(stderr, "Usage: %s compressed_file run_size\n",
+ argv[0]);
goto bail;
}
@@ -74,6 +76,7 @@ int main(int argc, char *argv[])
offs += olen >> 12; /* Add 8 bytes for each 32K block */
offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */
offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
+ run_size = atoi(argv[2]);
printf(".section \".rodata..compressed\",\"a\",@progbits\n");
printf(".globl z_input_len\n");
@@ -85,6 +88,8 @@ int main(int argc, char *argv[])
/* z_extract_offset_negative allows simplification of head_32.S */
printf(".globl z_extract_offset_negative\n");
printf("z_extract_offset_negative = -0x%lx\n", offs);
+ printf(".globl z_run_size\n");
+ printf("z_run_size = %lu\n", run_size);
printf(".globl input_data, input_data_end\n");
printf("input_data:\n");
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index f48b17df4224..3a52ee0e726d 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -20,7 +20,6 @@
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 678205195ae1..75450b2c7be4 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -14,12 +14,11 @@
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 2
+#define DEBUG_STACK 3
+#define MCE_STACK 4
+#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 8cd27e08e23c..8cd1cc3bc835 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -150,6 +150,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
}
void cpu_disable_common(void);
+void cpu_die_common(unsigned int cpu);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 854053889d4d..547e344a6dc6 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -141,7 +141,7 @@ struct thread_info {
/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK \
(_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \
- _TIF_USER_RETURN_NOTIFY)
+ _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index bc8352e7010a..707adc6549d8 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void);
#ifdef CONFIG_TRACING
asmlinkage void trace_page_fault(void);
+#define trace_stack_segment stack_segment
#define trace_divide_error divide_error
#define trace_bounds bounds
#define trace_invalid_op invalid_op
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index f04dbb3069b8..5caed1dd7ccf 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -21,6 +21,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{}
@@ -30,6 +31,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4b4f78c9ba19..cfa9b5b2c27a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
static int __init x86_xsave_setup(char *s)
{
+ if (strlen(s))
+ return 0;
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 7aa1acc79789..06674473b0e6 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size)
* load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory.
*/
-static void apply_ucode_in_initrd(void *ucode, size_t size)
+static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
{
struct equiv_cpu_entry *eq;
size_t *cont_sz;
u32 *header;
u8 *data, **cont;
+ u8 (*patch)[PATCH_MAX_SIZE];
u16 eq_id = 0;
int offset, left;
u32 rev, eax, ebx, ecx, edx;
@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
cont_sz = (size_t *)__pa_nodebug(&container_size);
cont = (u8 **)__pa_nodebug(&container);
+ patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
new_rev = &ucode_new_rev;
cont_sz = &container_size;
cont = &container;
+ patch = &amd_ucode_patch;
#endif
data = ucode;
@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
rev = mc->hdr.patch_id;
*new_rev = rev;
- /* save ucode patch */
- memcpy(amd_ucode_patch, mc,
- min_t(u32, header[1], PATCH_MAX_SIZE));
+ if (save_patch)
+ memcpy(patch, mc,
+ min_t(u32, header[1], PATCH_MAX_SIZE));
}
}
@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void)
*data = cp.data;
*size = cp.size;
- apply_ucode_in_initrd(cp.data, cp.size);
+ apply_ucode_in_initrd(cp.data, cp.size, true);
}
#ifdef CONFIG_X86_32
@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void)
size_t *usize;
void **ucode;
- mc = (struct microcode_amd *)__pa(amd_ucode_patch);
+ mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
__apply_microcode_amd(mc);
return;
@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void)
if (!*ucode || !*usize)
return;
- apply_ucode_in_initrd(*ucode, *usize);
+ apply_ucode_in_initrd(*ucode, *usize, false);
}
static void __init collect_cpu_sig_on_bsp(void *arg)
@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void)
* AP has a different equivalence ID than BSP, looks like
* mixed-steppings silicon so go through the ucode blob anew.
*/
- apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
+ apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
}
}
#endif
@@ -347,7 +350,9 @@ void load_ucode_amd_ap(void)
int __init save_microcode_in_initrd_amd(void)
{
unsigned long cont;
+ int retval = 0;
enum ucode_state ret;
+ u8 *cont_va;
u32 eax;
if (!container)
@@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void)
#ifdef CONFIG_X86_32
get_bsp_sig();
- cont = (unsigned long)container;
+ cont = (unsigned long)container;
+ cont_va = __va(container);
#else
/*
* We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address.
*/
- cont = __pa(container);
+ cont = __pa(container);
+ cont_va = container;
#endif
/*
@@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void)
if (relocated_ramdisk)
container = (u8 *)(__va(relocated_ramdisk) +
(cont - boot_params.hdr.ramdisk_image));
+ else
+ container = cont_va;
if (ucode_new_rev)
pr_info("microcode: updated early to new patch_level=0x%08x\n",
@@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void)
ret = load_microcode_amd(eax, container, container_size);
if (ret != UCODE_OK)
- return -EINVAL;
+ retval = -EINVAL;
/*
* This will be freed any msec now, stash patches for the current
@@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void)
container = NULL;
container_size = 0;
- return 0;
+ return retval;
}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index dd9d6190b08d..08fe6e8a726e 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -465,6 +465,16 @@ static void mc_bp_resume(void)
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
+#ifdef CONFIG_X86_64
+ else if (!uci->mc)
+ /*
+ * We might resume and not have applied late microcode but still
+ * have a newer patch stashed from the early loader. We don't
+ * have it in uci->mc so we have to load it the same way we're
+ * applying patches early on the APs.
+ */
+ load_ucode_ap();
+#endif
}
static struct syscore_ops mc_syscore_ops = {
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index 5f28a64e71ea..2c017f242a78 100644
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
@@ -124,7 +124,7 @@ void __init load_ucode_bsp(void)
static bool check_loader_disabled_ap(void)
{
#ifdef CONFIG_X86_32
- return __pa_nodebug(dis_ucode_ldr);
+ return *((bool *)__pa_nodebug(&dis_ucode_ldr));
#else
return dis_ucode_ldr;
#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index adf138eac85c..f9ed429d6e4f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
.attrs = snbep_uncore_qpi_formats_attr,
};
-#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
- .init_box = snbep_uncore_msr_init_box, \
+#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
.disable_box = snbep_uncore_msr_disable_box, \
.enable_box = snbep_uncore_msr_enable_box, \
.disable_event = snbep_uncore_msr_disable_event, \
.enable_event = snbep_uncore_msr_enable_event, \
.read_counter = uncore_msr_read_counter
+#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
+ __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
+ .init_box = snbep_uncore_msr_init_box \
+
static struct intel_uncore_ops snbep_uncore_msr_ops = {
SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
};
@@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = {
.format_group = &hswep_uncore_cbox_format_group,
};
+/*
+ * Write SBOX Initialization register bit by bit to avoid spurious #GPs
+ */
+static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+
+ if (msr) {
+ u64 init = SNBEP_PMON_BOX_CTL_INT;
+ u64 flags = 0;
+ int i;
+
+ for_each_set_bit(i, (unsigned long *)&init, 64) {
+ flags |= (1ULL << i);
+ wrmsrl(msr, flags);
+ }
+ }
+}
+
+static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
+ __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .init_box = hswep_uncore_sbox_msr_init_box
+};
+
static struct attribute *hswep_uncore_sbox_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = {
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
- .ops = &snbep_uncore_msr_ops,
+ .ops = &hswep_uncore_sbox_msr_ops,
.format_group = &hswep_uncore_sbox_format_group,
};
@@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = {
SNBEP_UNCORE_PCI_COMMON_INIT(),
};
+static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
+
+static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count = 0;
+
+ pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
+ pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
+
+ return count;
+}
+
static struct intel_uncore_ops hswep_uncore_irp_ops = {
.init_box = snbep_uncore_pci_init_box,
.disable_box = snbep_uncore_pci_disable_box,
.enable_box = snbep_uncore_pci_enable_box,
.disable_event = ivbep_uncore_irp_disable_event,
.enable_event = ivbep_uncore_irp_enable_event,
- .read_counter = ivbep_uncore_irp_read_counter,
+ .read_counter = hswep_uncore_irp_read_counter,
};
static struct intel_uncore_type hswep_uncore_irp = {
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 1abcb50b48ae..ff86f19b5758 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
[ DEBUG_STACK-1 ] = "#DB",
[ NMI_STACK-1 ] = "NMI",
[ DOUBLEFAULT_STACK-1 ] = "#DF",
- [ STACKFAULT_STACK-1 ] = "#SS",
[ MCE_STACK-1 ] = "#MC",
#if DEBUG_STKSZ > EXCEPTION_STKSZ
[ N_EXCEPTION_STACKS ...
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index df088bb03fb3..c0226ab54106 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -828,9 +828,15 @@ ENTRY(native_iret)
jnz native_irq_return_ldt
#endif
+.global native_irq_return_iret
native_irq_return_iret:
+ /*
+ * This may fault. Non-paranoid faults on return to userspace are
+ * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
+ * Double-faults due to espfix64 are handled in do_double_fault.
+ * Other faults here are fatal.
+ */
iretq
- _ASM_EXTABLE(native_irq_return_iret, bad_iret)
#ifdef CONFIG_X86_ESPFIX64
native_irq_return_ldt:
@@ -858,25 +864,6 @@ native_irq_return_ldt:
jmp native_irq_return_iret
#endif
- .section .fixup,"ax"
-bad_iret:
- /*
- * The iret traps when the %cs or %ss being restored is bogus.
- * We've lost the original trap vector and error code.
- * #GPF is the most likely one to get for an invalid selector.
- * So pretend we completed the iret and took the #GPF in user mode.
- *
- * We are now running with the kernel GS after exception recovery.
- * But error_entry expects us to have user GS to match the user %cs,
- * so swap back.
- */
- pushq $0
-
- SWAPGS
- jmp general_protection
-
- .previous
-
/* edi: workmask, edx: work */
retint_careful:
CFI_RESTORE_STATE
@@ -922,37 +909,6 @@ ENTRY(retint_kernel)
CFI_ENDPROC
END(common_interrupt)
- /*
- * If IRET takes a fault on the espfix stack, then we
- * end up promoting it to a doublefault. In that case,
- * modify the stack to make it look like we just entered
- * the #GP handler from user space, similar to bad_iret.
- */
-#ifdef CONFIG_X86_ESPFIX64
- ALIGN
-__do_double_fault:
- XCPT_FRAME 1 RDI+8
- movq RSP(%rdi),%rax /* Trap on the espfix stack? */
- sarq $PGDIR_SHIFT,%rax
- cmpl $ESPFIX_PGD_ENTRY,%eax
- jne do_double_fault /* No, just deliver the fault */
- cmpl $__KERNEL_CS,CS(%rdi)
- jne do_double_fault
- movq RIP(%rdi),%rax
- cmpq $native_irq_return_iret,%rax
- jne do_double_fault /* This shouldn't happen... */
- movq PER_CPU_VAR(kernel_stack),%rax
- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
- movq %rax,RSP(%rdi)
- movq $0,(%rax) /* Missing (lost) #GP error code */
- movq $general_protection,RIP(%rdi)
- retq
- CFI_ENDPROC
-END(__do_double_fault)
-#else
-# define __do_double_fault do_double_fault
-#endif
-
/*
* APIC interrupts.
*/
@@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0
idtentry bounds do_bounds has_error_code=0
idtentry invalid_op do_invalid_op has_error_code=0
idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault __do_double_fault has_error_code=1 paranoid=1
+idtentry double_fault do_double_fault has_error_code=1 paranoid=1
idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
idtentry invalid_TSS do_invalid_TSS has_error_code=1
idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1
+idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
idtentry xen_debug do_debug has_error_code=0
idtentry xen_int3 do_int3 has_error_code=0
@@ -1399,17 +1355,16 @@ error_sti:
/*
* There are two places in the kernel that can potentially fault with
- * usergs. Handle them here. The exception handlers after iret run with
- * kernel gs again, so don't set the user space flag. B stepping K8s
- * sometimes report an truncated RIP for IRET exceptions returning to
- * compat mode. Check for these here too.
+ * usergs. Handle them here. B stepping K8s sometimes report a
+ * truncated RIP for IRET exceptions returning to compat mode. Check
+ * for these here too.
*/
error_kernelspace:
CFI_REL_OFFSET rcx, RCX+8
incl %ebx
leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp)
- je error_swapgs
+ je error_bad_iret
movl %ecx,%eax /* zero extend */
cmpq %rax,RIP+8(%rsp)
je bstep_iret
@@ -1420,7 +1375,15 @@ error_kernelspace:
bstep_iret:
/* Fix truncated RIP */
movq %rcx,RIP+8(%rsp)
- jmp error_swapgs
+ /* fall through */
+
+error_bad_iret:
+ SWAPGS
+ mov %rsp,%rdi
+ call fixup_bad_iret
+ mov %rax,%rsp
+ decl %ebx /* Return to usergs */
+ jmp error_sti
CFI_ENDPROC
END(error_entry)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 749b0e423419..e510618b2e91 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
*/
if (work & _TIF_NOHZ) {
user_exit();
- work &= ~TIF_NOHZ;
+ work &= ~_TIF_NOHZ;
}
#ifdef CONFIG_SECCOMP
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 4d2128ac70bd..668d8f2a8781 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1303,10 +1303,14 @@ static void __ref remove_cpu_from_maps(int cpu)
numa_remove_cpu(cpu);
}
+static DEFINE_PER_CPU(struct completion, die_complete);
+
void cpu_disable_common(void)
{
int cpu = smp_processor_id();
+ init_completion(&per_cpu(die_complete, smp_processor_id()));
+
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
@@ -1316,8 +1320,6 @@ void cpu_disable_common(void)
fixup_irqs();
}
-static DEFINE_PER_CPU(struct completion, die_complete);
-
int native_cpu_disable(void)
{
int ret;
@@ -1327,16 +1329,21 @@ int native_cpu_disable(void)
return ret;
clear_local_APIC();
- init_completion(&per_cpu(die_complete, smp_processor_id()));
cpu_disable_common();
return 0;
}
+void cpu_die_common(unsigned int cpu)
+{
+ wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
+}
+
void native_cpu_die(unsigned int cpu)
{
/* We don't do anything here: idle task is faking death itself. */
- wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
+
+ cpu_die_common(cpu);
/* They ack this in play_dead() by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 0d0e922fafc1..de801f22128a 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
-#ifdef CONFIG_X86_32
DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
-#endif
DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
#ifdef CONFIG_X86_64
/* Runs on IST stack */
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
-{
- enum ctx_state prev_state;
-
- prev_state = exception_enter();
- if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
- X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
- preempt_conditional_sti(regs);
- do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
- preempt_conditional_cli(regs);
- }
- exception_exit(prev_state);
-}
-
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
{
static const char str[] = "double fault";
struct task_struct *tsk = current;
+#ifdef CONFIG_X86_ESPFIX64
+ extern unsigned char native_irq_return_iret[];
+
+ /*
+ * If IRET takes a non-IST fault on the espfix64 stack, then we
+ * end up promoting it to a doublefault. In that case, modify
+ * the stack to make it look like we just entered the #GP
+ * handler from user space, similar to bad_iret.
+ */
+ if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
+ regs->cs == __KERNEL_CS &&
+ regs->ip == (unsigned long)native_irq_return_iret)
+ {
+ struct pt_regs *normal_regs = task_pt_regs(current);
+
+ /* Fake a #GP(0) from userspace. */
+ memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
+ normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
+ regs->ip = (unsigned long)general_protection;
+ regs->sp = (unsigned long)&normal_regs->orig_ax;
+ return;
+ }
+#endif
+
exception_enter();
/* Return not checked because double check cannot be ignored */
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
return regs;
}
NOKPROBE_SYMBOL(sync_regs);
+
+struct bad_iret_stack {
+ void *error_entry_ret;
+ struct pt_regs regs;
+};
+
+asmlinkage __visible
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+{
+ /*
+ * This is called from entry_64.S early in handling a fault
+ * caused by a bad iret to user mode. To handle the fault
+ * correctly, we want move our stack frame to task_pt_regs
+ * and we want to pretend that the exception came from the
+ * iret target.
+ */
+ struct bad_iret_stack *new_stack =
+ container_of(task_pt_regs(current),
+ struct bad_iret_stack, regs);
+
+ /* Copy the IRET target to the new stack. */
+ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+
+ /* Copy the remainder of the stack from the current stack. */
+ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+
+ BUG_ON(!user_mode_vm(&new_stack->regs));
+ return new_stack;
+}
#endif
/*
@@ -778,7 +815,7 @@ void __init trap_init(void)
set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
set_intr_gate(X86_TRAP_TS, invalid_TSS);
set_intr_gate(X86_TRAP_NP, segment_not_present);
- set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
+ set_intr_gate(X86_TRAP_SS, stack_segment);
set_intr_gate(X86_TRAP_GP, general_protection);
set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
set_intr_gate(X86_TRAP_MF, coprocessor_error);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5edf088ca51e..9f8a2faf5040 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4287,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
fetch_register_operand(op);
break;
case OpCL:
+ op->type = OP_IMM;
op->bytes = 1;
op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
break;
@@ -4294,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
rc = decode_imm(ctxt, op, 1, true);
break;
case OpOne:
+ op->type = OP_IMM;
op->bytes = 1;
op->val = 1;
break;
@@ -4352,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
ctxt->memop.bytes = ctxt->op_bytes + 2;
goto mem_common;
case OpES:
+ op->type = OP_IMM;
op->val = VCPU_SREG_ES;
break;
case OpCS:
+ op->type = OP_IMM;
op->val = VCPU_SREG_CS;
break;
case OpSS:
+ op->type = OP_IMM;
op->val = VCPU_SREG_SS;
break;
case OpDS:
+ op->type = OP_IMM;
op->val = VCPU_SREG_DS;
break;
case OpFS:
+ op->type = OP_IMM;
op->val = VCPU_SREG_FS;
break;
case OpGS:
+ op->type = OP_IMM;
op->val = VCPU_SREG_GS;
break;
case OpImplicit:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac1c4de3a484..978f402006ee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
* kvm mmu, before reclaiming the page, we should
* unmap it from mmu first.
*/
- WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
+ WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
- kvm_is_mmio_pfn(pfn));
+ kvm_is_reserved_pfn(pfn));
if (host_writable)
spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/
- if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 7609e0e421ec..1318f75d56e4 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -41,9 +41,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
while (((unsigned long)src & 6) && len >= 2) {
__u16 val16;
- *errp = __get_user(val16, (const __u16 __user *)src);
- if (*errp)
- return isum;
+ if (__get_user(val16, (const __u16 __user *)src))
+ goto out_err;
*(__u16 *)dst = val16;
isum = (__force __wsum)add32_with_carry(
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 4cb8763868fc..4e5dfec750fc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1123,7 +1123,7 @@ void mark_rodata_ro(void)
unsigned long end = (unsigned long) &__end_rodata_hpage_align;
unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
- unsigned long all_end = PFN_ALIGN(&_end);
+ unsigned long all_end;
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
@@ -1134,7 +1134,16 @@ void mark_rodata_ro(void)
/*
* The rodata/data/bss/brk section (but not the kernel text!)
* should also be not-executable.
+ *
+ * We align all_end to PMD_SIZE because the existing mapping
+ * is a full PMD. If we would align _brk_end to PAGE_SIZE we
+ * split the PMD and the reminder between _brk_end and the end
+ * of the PMD will remain mapped executable.
+ *
+ * Any PMD which was setup after the one which covers _brk_end
+ * has been zapped already via cleanup_highmem().
*/
+ all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
rodata_test();
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
new file mode 100644
index 000000000000..23210baade2d
--- /dev/null
+++ b/arch/x86/tools/calc_run_size.pl
@@ -0,0 +1,39 @@
+#!/usr/bin/perl
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | perl calc_run_size.pl
+use strict;
+
+my $mem_size = 0;
+my $file_offset = 0;
+
+my $sections=" *[0-9]+ \.(?:bss|brk) +";
+while (<>) {
+ if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
+ my $size = hex($1);
+ my $offset = hex($2);
+ $mem_size += $size;
+ if ($file_offset == 0) {
+ $file_offset = $offset;
+ } elsif ($file_offset != $offset) {
+ # BFD linker shows the same file offset in ELF.
+ # Gold linker shows them as consecutive.
+ next if ($file_offset + $mem_size == $offset + $size);
+
+ printf STDERR "file_offset: 0x%lx\n", $file_offset;
+ printf STDERR "mem_size: 0x%lx\n", $mem_size;
+ printf STDERR "offset: 0x%lx\n", $offset;
+ printf STDERR "size: 0x%lx\n", $size;
+
+ die ".bss and .brk are non-contiguous\n";
+ }
+ }
+}
+
+if ($file_offset == 0) {
+ die "Never found .bss or .brk file offset\n";
+}
+printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8650cdb53209..4c071aeb8417 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -510,6 +510,9 @@ static void xen_cpu_die(unsigned int cpu)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
+
+ cpu_die_common(cpu);
+
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 49c6c3d94449..81f57e8c8f1b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -319,8 +319,8 @@ config XTENSA_PLATFORM_S6105
config XTENSA_PLATFORM_XTFPGA
bool "XTFPGA"
+ select ETHOC if ETHERNET
select SERIAL_CONSOLE
- select ETHOC
select XTENSA_CALIBRATE_CCOUNT
help
XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
@@ -367,7 +367,7 @@ config BUILTIN_DTB
config BLK_DEV_SIMDISK
tristate "Host file-based simulated block device support"
default n
- depends on XTENSA_PLATFORM_ISS
+ depends on XTENSA_PLATFORM_ISS && BLOCK
help
Create block devices that map to files in the host file system.
Device binding to host file may be changed at runtime via proc
diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts
new file mode 100644
index 000000000000..249822b99bd6
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx200mx.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-lx200";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x06000000>;
+ };
+ pic: pic {
+ compatible = "cdns,xtensa-mx";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+};
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
new file mode 100644
index 000000000000..f4b7b3888da8
--- /dev/null
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -0,0 +1,131 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_DC233C=y
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
new file mode 100644
index 000000000000..22eeacba37cc
--- /dev/null
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -0,0 +1,135 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HAVE_SMP=y
+CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="lx200mx"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_VM=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index b2173e5da601..0383aed59121 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -277,6 +277,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
static inline pte_t pte_mkspecial(pte_t pte)
{ return pte; }
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 8883fc877c5c..db5bb72e2f4e 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
#define __NR_pivot_root 175
__SYSCALL(175, sys_pivot_root, 2)
#define __NR_umount 176
-__SYSCALL(176, sys_umount, 2)
+__SYSCALL(176, sys_oldumount, 1)
+#define __ARCH_WANT_SYS_OLDUMOUNT
#define __NR_swapoff 177
__SYSCALL(177, sys_swapoff, 1)
#define __NR_sync 178
@@ -742,7 +743,14 @@ __SYSCALL(335, sys_sched_getattr, 3)
#define __NR_renameat2 336
__SYSCALL(336, sys_renameat2, 5)
-#define __NR_syscall_count 337
+#define __NR_seccomp 337
+__SYSCALL(337, sys_seccomp, 3)
+#define __NR_getrandom 338
+__SYSCALL(338, sys_getrandom, 3)
+#define __NR_memfd_create 339
+__SYSCALL(339, sys_memfd_create, 2)
+
+#define __NR_syscall_count 340
/*
* sysxtensa syscall handler