summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig21
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts392
-rw-r--r--arch/powerpc/boot/dts/b4420qds.dts50
-rw-r--r--arch/powerpc/boot/dts/b4860qds.dts61
-rw-r--r--arch/powerpc/boot/dts/b4qds.dts169
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-post.dtsi98
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi73
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi142
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi83
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi268
-rw-r--r--arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/e6500_power_isa.dtsi65
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-gpio-1.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-gpio-2.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-gpio-3.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec5.0-0.dtsi (renamed from arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi)27
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec5.3-0.dtsi119
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi442
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi128
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi4
-rw-r--r--arch/powerpc/boot/dts/mpc5121ads.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts233
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds_36b.dts6
-rw-r--r--arch/powerpc/boot/dts/p1021rdb-pc.dtsi12
-rw-r--r--arch/powerpc/boot/dts/p1025rdb_36b.dts5
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts2
-rw-r--r--arch/powerpc/boot/dts/t4240qds.dts224
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig46
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig40
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig32
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/include/asm/bitops.h11
-rw-r--r--arch/powerpc/include/asm/cputable.h13
-rw-r--r--arch/powerpc/include/asm/dma.h5
-rw-r--r--arch/powerpc/include/asm/elf.h1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/firmware.h6
-rw-r--r--arch/powerpc/include/asm/hardirq.h3
-rw-r--r--arch/powerpc/include/asm/hugetlb.h31
-rw-r--r--arch/powerpc/include/asm/hvcall.h3
-rw-r--r--arch/powerpc/include/asm/hw_irq.h16
-rw-r--r--arch/powerpc/include/asm/io.h4
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h13
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h41
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h114
-rw-r--r--arch/powerpc/include/asm/linkage.h13
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h22
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h60
-rw-r--r--arch/powerpc/include/asm/opal.h2
-rw-r--r--arch/powerpc/include/asm/page.h19
-rw-r--r--arch/powerpc/include/asm/page_64.h3
-rw-r--r--arch/powerpc/include/asm/parport.h4
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h19
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h9
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h45
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h158
-rw-r--r--arch/powerpc/include/asm/pgalloc.h46
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h6
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h3
-rw-r--r--arch/powerpc/include/asm/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h12
-rw-r--r--arch/powerpc/include/asm/processor.h12
-rw-r--r--arch/powerpc/include/asm/prom.h69
-rw-r--r--arch/powerpc/include/asm/ptrace.h3
-rw-r--r--arch/powerpc/include/asm/reg.h24
-rw-r--r--arch/powerpc/include/asm/rtas.h6
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h10
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/topology.h5
-rw-r--r--arch/powerpc/include/asm/unistd.h6
-rw-r--r--arch/powerpc/include/asm/uprobes.h1
-rw-r--r--arch/powerpc/include/asm/xics.h1
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h9
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h94
-rw-r--r--arch/powerpc/include/uapi/asm/linkage.h6
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h1
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S16
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S31
-rw-r--r--arch/powerpc/kernel/cputable.c23
-rw-r--r--arch/powerpc/kernel/crash_dump.c5
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S24
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S2
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S47
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S166
-rw-r--r--arch/powerpc/kernel/fadump.c5
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_booke.h21
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/idle.c89
-rw-r--r--arch/powerpc/kernel/idle_book3e.S32
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c12
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/nvram_64.c3
-rw-r--r--arch/powerpc/kernel/pci-common.c157
-rw-r--r--arch/powerpc/kernel/pci_32.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c21
-rw-r--r--arch/powerpc/kernel/process.c24
-rw-r--r--arch/powerpc/kernel/prom_init.c98
-rw-r--r--arch/powerpc/kernel/ptrace.c31
-rw-r--r--arch/powerpc/kernel/ptrace32.c15
-rw-r--r--arch/powerpc/kernel/rtas_flash.c497
-rw-r--r--arch/powerpc/kernel/rtas_pci.c4
-rw-r--r--arch/powerpc/kernel/rtasd.c49
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c109
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/powerpc/kernel/tm.S2
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/udbg.c6
-rw-r--r--arch/powerpc/kernel/uprobes.c29
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S26
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S26
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S1
-rw-r--r--arch/powerpc/kvm/44x.c12
-rw-r--r--arch/powerpc/kvm/Kconfig26
-rw-r--r--arch/powerpc/kvm/Makefile12
-rw-r--r--arch/powerpc/kvm/book3s.c36
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c122
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c102
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c406
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S228
-rw-r--r--arch/powerpc/kvm/book3s_pr.c9
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c21
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c274
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1270
-rw-r--r--arch/powerpc/kvm/book3s_xics.h130
-rw-r--r--arch/powerpc/kvm/booke.c158
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S42
-rw-r--r--arch/powerpc/kvm/e500.c14
-rw-r--r--arch/powerpc/kvm/e500.h46
-rw-r--r--arch/powerpc/kvm/e500_emulate.c19
-rw-r--r--arch/powerpc/kvm/e500_mmu.c192
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c84
-rw-r--r--arch/powerpc/kvm/e500mc.c23
-rw-r--r--arch/powerpc/kvm/emulate.c2
-rw-r--r--arch/powerpc/kvm/irq.h20
-rw-r--r--arch/powerpc/kvm/mpic.c1853
-rw-r--r--arch/powerpc/kvm/powerpc.c133
-rw-r--r--arch/powerpc/mm/gup.c18
-rw-r--r--arch/powerpc/mm/hash_low_64.S22
-rw-r--r--arch/powerpc/mm/hash_native_64.c178
-rw-r--r--arch/powerpc/mm/hash_utils_64.c184
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c33
-rw-r--r--arch/powerpc/mm/hugetlbpage.c192
-rw-r--r--arch/powerpc/mm/icswx.c2
-rw-r--r--arch/powerpc/mm/init_64.c14
-rw-r--r--arch/powerpc/mm/mem.c38
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c37
-rw-r--r--arch/powerpc/mm/numa.c288
-rw-r--r--arch/powerpc/mm/pgtable_64.c118
-rw-r--r--arch/powerpc/mm/slice.c223
-rw-r--r--arch/powerpc/mm/tlb_nohash.c18
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c12
-rw-r--r--arch/powerpc/perf/Makefile5
-rw-r--r--arch/powerpc/perf/bhrb.S44
-rw-r--r--arch/powerpc/perf/core-book3s.c231
-rw-r--r--arch/powerpc/perf/power5+-pmu.c2
-rw-r--r--arch/powerpc/perf/power5-pmu.c1
-rw-r--r--arch/powerpc/perf/power8-pmu.c592
-rw-r--r--arch/powerpc/platforms/40x/Kconfig7
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/Kconfig10
-rw-r--r--arch/powerpc/platforms/512x/Makefile2
-rw-r--r--arch/powerpc/platforms/512x/clock.c9
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h1
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c (renamed from arch/powerpc/platforms/512x/mpc5121_generic.c)12
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c38
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig34
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/b4_qds.c102
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c5
-rw-r--r--arch/powerpc/platforms/85xx/smp.c2
-rw-r--r--arch/powerpc/platforms/85xx/t4240_qds.c98
-rw-r--r--arch/powerpc/platforms/Kconfig4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig26
-rw-r--r--arch/powerpc/platforms/cell/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c26
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c209
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.h24
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c115
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c156
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c20
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig5
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc10x.h11
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c5
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c14
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c5
-rw-r--r--arch/powerpc/platforms/powermac/pci.c2
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig5
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c15
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c317
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c15
-rw-r--r--arch/powerpc/platforms/powernv/pci.c113
-rw-r--r--arch/powerpc/platforms/powernv/pci.h26
-rw-r--r--arch/powerpc/platforms/powernv/smp.c4
-rw-r--r--arch/powerpc/platforms/prep/Kconfig23
-rw-r--r--arch/powerpc/platforms/ps3/htab.c8
-rw-r--r--arch/powerpc/platforms/ps3/time.c4
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c54
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c12
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c26
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c16
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c41
-rw-r--r--arch/powerpc/platforms/pseries/msi.c21
-rw-r--r--arch/powerpc/platforms/pseries/pci.c53
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h15
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c34
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h9
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c32
-rw-r--r--arch/powerpc/platforms/pseries/setup.c42
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig5
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c2
-rw-r--r--arch/powerpc/sysdev/Kconfig1
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c125
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h13
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c10
-rw-r--r--arch/powerpc/sysdev/mpic.c14
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c16
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c15
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig2
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c5
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c10
-rw-r--r--arch/powerpc/xmon/xmon.c4
272 files changed, 12381 insertions, 2762 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ea5bb045983a..bbbe02197afb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -114,7 +114,6 @@ config PPC
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_OPROFILE
select HAVE_DEBUG_KMEMLEAK
- select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_PERF_EVENTS
@@ -428,11 +427,6 @@ config NODES_SHIFT
default "4"
depends on NEED_MULTIPLE_NODES
-config MAX_ACTIVE_REGIONS
- int
- default "256" if PPC64
- default "32"
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on PPC64
@@ -647,14 +641,14 @@ menu "Bus options"
config ISA
bool "Support for ISA-bus hardware"
- depends on PPC_PREP || PPC_CHRP
+ depends on PPC_CHRP
select PPC_I8259
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
- have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
- you have an embedded board, consult your board documentation.
+ have an IBM RS/6000 or pSeries machine, say Y. If you have an
+ embedded board, consult your board documentation.
config ZONE_DMA
bool
@@ -686,7 +680,6 @@ config SBUS
config FSL_SOC
bool
select HAVE_CAN_FLEXCAN if NET && CAN
- select PPC_CLOCK
config FSL_PCI
bool
@@ -745,7 +738,6 @@ config PCI
bool "PCI support" if PPC_PCI_CHOICE
default y if !40x && !CPM2 && !8xx && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
- default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
default PCI_QSPAN if !4xx && !CPM2 && 8xx
select ARCH_SUPPORTS_MSI
select GENERIC_PCI_IOMAP
@@ -775,11 +767,6 @@ config PCI_8260
select PPC_INDIRECT_PCI
default y
-config 8260_PCI9
- bool "Enable workaround for MPC826x erratum PCI 9"
- depends on PCI_8260 && !8272
- default y
-
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
@@ -969,7 +956,7 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
- default "0x80000000" if PPC_PREP || PPC_8xx
+ default "0x80000000" if PPC_8xx
default "0xc0000000"
config CONSISTENT_SIZE_BOOL
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
new file mode 100644
index 000000000000..a27a4609bb42
--- /dev/null
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -0,0 +1,392 @@
+/*
+ * Device Tree Source for the MPC5121e based ac14xx board
+ *
+ * Copyright 2012 Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+/include/ "mpc5121.dtsi"
+
+/ {
+ model = "ac14xx";
+ compatible = "ifm,ac14xx", "fsl,mpc5121";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial7;
+ spi4 = &spi4;
+ spi5 = &spi5;
+ };
+
+ cpus {
+ PowerPC,5121@0 {
+ timebase-frequency = <40000000>; /* 40 MHz (csb/4) */
+ bus-frequency = <160000000>; /* 160 MHz csb bus */
+ clock-frequency = <400000000>; /* 400 MHz ppc core */
+ };
+ };
+
+ memory {
+ reg = <0x00000000 0x10000000>; /* 256MB at 0 */
+ };
+
+ nfc@40000000 {
+ status = "disabled";
+ };
+
+ localbus@80000020 {
+ ranges = <0x0 0x0 0xfc000000 0x04000000 /* CS0: NOR flash */
+ 0x1 0x0 0xe0000000 0x00010000 /* CS1: FRAM */
+ 0x2 0x0 0xe0100000 0x00080000 /* CS2: asi1 */
+ 0x3 0x0 0xe0300000 0x00020000 /* CS3: comm */
+ 0x5 0x0 0xe0400000 0x00010000 /* CS5: safety */
+ 0x6 0x0 0xe0200000 0x00080000>; /* CS6: asi2 */
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0x00000000 0x04000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ device-width = <2>;
+
+ partition@0 {
+ label = "dtb-kernel-production";
+ reg = <0x00000000 0x00400000>;
+ };
+ partition@1 {
+ label = "filesystem-production";
+ reg = <0x00400000 0x03400000>;
+ };
+
+ partition@2 {
+ label = "recovery";
+ reg = <0x03800000 0x00700000>;
+ };
+
+ partition@3 {
+ label = "uboot-code";
+ reg = <0x03f00000 0x00040000>;
+ };
+ partition@4 {
+ label = "uboot-env1";
+ reg = <0x03f40000 0x00020000>;
+ };
+ partition@5 {
+ label = "uboot-env2";
+ reg = <0x03f60000 0x00020000>;
+ };
+ };
+
+ fram@1,0 {
+ compatible = "ifm,ac14xx-fram", "linux,uio-pdrv-genirq";
+ reg = <1 0x00000000 0x00010000>;
+ };
+
+ asi@2,0 {
+ /* masters mapping: CS, CS offset, size */
+ reg = <2 0x00000000 0x00080000
+ 6 0x00000000 0x00080000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ifm,ac14xx-asi-fpga";
+ gpios = <
+ &gpio_pic 26 0 /* prog */
+ &gpio_pic 27 0 /* done */
+ &gpio_pic 10 0 /* reset */
+ >;
+
+ master@1 {
+ interrupts = <20 0x2>;
+ interrupt-parent = <&gpio_pic>;
+ chipselect = <2 0x00009000 0x00009100>;
+ label = "AS-i master 1";
+ };
+
+ master@2 {
+ interrupts = <21 0x2>;
+ interrupt-parent = <&gpio_pic>;
+ chipselect = <6 0x00009000 0x00009100>;
+ label = "AS-i master 2";
+ };
+ };
+
+ netx@3,0 {
+ compatible = "ifm,netx";
+ reg = <0x3 0x00000000 0x00020000>;
+ chipselect = <3 0x00101140 0x00203100>;
+ interrupts = <17 0x8>;
+ gpios = <&gpio_pic 15 0>;
+ };
+
+ safety@5,0 {
+ compatible = "ifm,safety";
+ reg = <0x5 0x00000000 0x00010000>;
+ chipselect = <5 0x00009000 0x00009100>;
+ interrupts = <22 0x2>;
+ interrupt-parent = <&gpio_pic>;
+ gpios = <
+ &gpio_pic 12 0 /* prog */
+ &gpio_pic 11 0 /* done */
+ >;
+ };
+ };
+
+ soc@80000000 {
+
+ clock@f00 {
+ compatible = "fsl,mpc5121rev2-clock", "fsl,mpc5121-clock";
+ };
+
+ /*
+ * GPIO PIC:
+ * interrupts cell = <pin nr, sense>
+ * sense == 8: Level, low assertion
+ * sense == 2: Edge, high-to-low change
+ */
+ gpio_pic: gpio@1100 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ sdhc@1500 {
+ cd-gpios = <&gpio_pic 23 0>; /* card detect */
+ wp-gpios = <&gpio_pic 24 0>; /* write protect */
+ wp-inverted; /* WP active high */
+ };
+
+ i2c@1700 {
+ /* use Fast-mode */
+ clock-frequency = <400000>;
+
+ at24@30 {
+ compatible = "at24,24c01";
+ reg = <0x30>;
+ };
+
+ at24@31 {
+ compatible = "at24,24c01";
+ reg = <0x31>;
+ };
+
+ temp@48 {
+ compatible = "ad,ad7414";
+ reg = <0x48>;
+ };
+
+ at24@50 {
+ compatible = "at24,24c01";
+ reg = <0x50>;
+ };
+
+ at24@51 {
+ compatible = "at24,24c01";
+ reg = <0x51>;
+ };
+
+ at24@52 {
+ compatible = "at24,24c01";
+ reg = <0x52>;
+ };
+
+ at24@53 {
+ compatible = "at24,24c01";
+ reg = <0x53>;
+ };
+
+ at24@54 {
+ compatible = "at24,24c01";
+ reg = <0x54>;
+ };
+
+ at24@55 {
+ compatible = "at24,24c01";
+ reg = <0x55>;
+ };
+
+ at24@56 {
+ compatible = "at24,24c01";
+ reg = <0x56>;
+ };
+
+ at24@57 {
+ compatible = "at24,24c01";
+ reg = <0x57>;
+ };
+
+ rtc@68 {
+ compatible = "stm,m41t00";
+ reg = <0x68>;
+ };
+ };
+
+ axe_pic: axe-base@2000 {
+ compatible = "fsl,mpc5121-axe-base";
+ reg = <0x2000 0x100>;
+ interrupts = <42 0x8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ axe-app {
+ compatible = "fsl,mpc5121-axe-app";
+ interrupt-parent = <&axe_pic>;
+ interrupts = <
+ /* soft interrupts */
+ 0 0x0 1 0x0 2 0x0 3 0x0
+ 4 0x0 5 0x0 6 0x0 7 0x0
+ /* fifo interrupts */
+ 8 0x0 9 0x0 10 0x0 11 0x0
+ >;
+ };
+
+ display@2100 {
+ edid = [00 FF FF FF FF FF FF 00 14 94 00 00 00 00 00 00
+ 0A 12 01 03 80 1C 23 78 CA 88 FF 94 52 54 8E 27
+ 1E 4C 50 00 00 00 01 01 01 01 01 01 01 01 01 01
+ 01 01 01 01 01 01 FB 00 B0 14 00 DC 05 00 08 04
+ 21 00 1C 23 00 00 00 18 00 00 00 FD 00 38 3C 1F
+ 3C 01 0A 20 20 20 20 20 20 20 00 00 00 FC 00 45
+ 54 30 31 38 30 30 33 44 4D 55 0A 0A 00 00 00 10
+ 00 41 30 30 30 30 30 30 30 30 30 30 30 31 00 D5];
+ };
+
+ can@2300 {
+ status = "disabled";
+ };
+
+ can@2380 {
+ status = "disabled";
+ };
+
+ viu@2400 {
+ status = "disabled";
+ };
+
+ mdio@2800 {
+ phy0: ethernet-phy@1f {
+ compatible = "smsc,lan8700";
+ reg = <0x1f>;
+ };
+ };
+
+ enet: ethernet@2800 {
+ phy-handle = <&phy0>;
+ };
+
+ usb@3000 {
+ status = "disabled";
+ };
+
+ usb@4000 {
+ status = "disabled";
+ };
+
+ /* PSC3 serial port A, aka ttyPSC0 */
+ serial0: psc@11300 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+ fsl,rx-fifo-size = <512>;
+ fsl,tx-fifo-size = <512>;
+ };
+
+ /* PSC4 in SPI mode */
+ spi4: psc@11400 {
+ compatible = "fsl,mpc5121-psc-spi", "fsl,mpc5121-psc";
+ fsl,rx-fifo-size = <768>;
+ fsl,tx-fifo-size = <768>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-cs = <1>;
+ cs-gpios = <&gpio_pic 25 0>;
+
+ flash: m25p128@0 {
+ compatible = "st,m25p128";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "spi-flash0";
+ reg = <0x00000000 0x01000000>;
+ };
+ };
+ };
+
+ /* PSC5 in SPI mode */
+ spi5: psc@11500 {
+ compatible = "fsl,mpc5121-psc-spi", "fsl,mpc5121-psc";
+ fsl,mode = "spi-master";
+ fsl,rx-fifo-size = <128>;
+ fsl,tx-fifo-size = <128>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lcd@0 {
+ compatible = "ilitek,ili922x";
+ reg = <0>;
+ spi-max-frequency = <100000>;
+ spi-cpol;
+ spi-cpha;
+ };
+ };
+
+ /* PSC7 serial port C, aka ttyPSC2 */
+ serial7: psc@11700 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+ fsl,rx-fifo-size = <512>;
+ fsl,tx-fifo-size = <512>;
+ };
+
+ matrix_keypad@0 {
+ compatible = "gpio-matrix-keypad";
+ debounce-delay-ms = <5>;
+ col-scan-delay-us = <1>;
+ gpio-activelow;
+ col-gpios-binary;
+ col-switch-delay-ms = <200>;
+
+ col-gpios = <&gpio_pic 1 0>; /* pin1 */
+
+ row-gpios = <&gpio_pic 2 0 /* pin2 */
+ &gpio_pic 3 0 /* pin3 */
+ &gpio_pic 4 0>; /* pin4 */
+
+ linux,keymap = <0x0000006e /* FN LEFT */
+ 0x01000067 /* UP */
+ 0x02000066 /* FN RIGHT */
+ 0x00010069 /* LEFT */
+ 0x0101006a /* DOWN */
+ 0x0201006c>; /* RIGHT */
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ backlight {
+ label = "backlight";
+ gpios = <&gpio_pic 0 0>;
+ default-state = "keep";
+ };
+ green {
+ label = "green";
+ gpios = <&gpio_pic 18 0>;
+ default-state = "keep";
+ };
+ red {
+ label = "red";
+ gpios = <&gpio_pic 19 0>;
+ default-state = "keep";
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/b4420qds.dts b/arch/powerpc/boot/dts/b4420qds.dts
new file mode 100644
index 000000000000..923156d03b30
--- /dev/null
+++ b/arch/powerpc/boot/dts/b4420qds.dts
@@ -0,0 +1,50 @@
+/*
+ * B4420DS Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+/include/ "fsl/b4420si-pre.dtsi"
+/include/ "b4qds.dts"
+
+/ {
+ model = "fsl,B4420QDS";
+ compatible = "fsl,B4420QDS";
+
+ ifc: localbus@ffe124000 {
+ board-control@3,0 {
+ compatible = "fsl,b4420qds-fpga", "fsl,fpga-qixis";
+ };
+ };
+
+};
+
+/include/ "fsl/b4420si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/b4860qds.dts b/arch/powerpc/boot/dts/b4860qds.dts
new file mode 100644
index 000000000000..78907f38bb77
--- /dev/null
+++ b/arch/powerpc/boot/dts/b4860qds.dts
@@ -0,0 +1,61 @@
+/*
+ * B4860DS Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/b4860si-pre.dtsi"
+/include/ "b4qds.dts"
+
+/ {
+ model = "fsl,B4860QDS";
+ compatible = "fsl,B4860QDS";
+
+ ifc: localbus@ffe124000 {
+ board-control@3,0 {
+ compatible = "fsl,b4860qds-fpga", "fsl,fpga-qixis";
+ };
+ };
+
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
+
+};
+
+/include/ "fsl/b4860si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/b4qds.dts b/arch/powerpc/boot/dts/b4qds.dts
new file mode 100644
index 000000000000..e6d2f8f90544
--- /dev/null
+++ b/arch/powerpc/boot/dts/b4qds.dts
@@ -0,0 +1,169 @@
+/*
+ * B4420DS Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+/ {
+ model = "fsl,B4QDS";
+ compatible = "fsl,B4QDS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ ifc: localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x2000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xff800000 0x00010000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x2 0x0 0x10000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 10MB for Linux Kernel Image */
+ reg = <0x00200000 0x00A00000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@c00000 {
+ /* 500MB for Root file System Image */
+ reg = <0x00c00000 0x1F400000>;
+ label = "NAND RFS Image";
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,b4qds-fpga", "fsl,fpga-qixis";
+ reg = <3 0 0x300>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ dcsr: dcsr@f00000000 {
+ ranges = <0x00000000 0xf 0x00000000 0x01052000>;
+ };
+
+ soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "sst,sst25wf040";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ };
+ };
+
+ sdhc@114000 {
+ /*Disabled as there is no sdhc connector on B4420QDS board*/
+ status = "disabled";
+ };
+
+ i2c@118000 {
+ eeprom@50 {
+ compatible = "at24,24c64";
+ reg = <0x50>;
+ };
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@53 {
+ compatible = "at24,24c256";
+ reg = <0x53>;
+ };
+ eeprom@57 {
+ compatible = "at24,24c256";
+ reg = <0x57>;
+ };
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ };
+ };
+
+ usb@210000 {
+ dr_mode = "host";
+ phy_type = "ulpi";
+ };
+
+ };
+
+ pci0: pcie@ffe200000 {
+ reg = <0xf 0xfe200000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+};
+
+/include/ "fsl/b4si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
new file mode 100644
index 000000000000..5a6615d0ade2
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
@@ -0,0 +1,98 @@
+/*
+ * B4420 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+/include/ "b4si-post.dtsi"
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,b4420-pcie", "fsl,qoriq-pcie-v2.4";
+};
+
+&dcsr {
+ dcsr-epu@0 {
+ compatible = "fsl,b4420-dcsr-epu", "fsl,dcsr-epu";
+ };
+ dcsr-npc {
+ compatible = "fsl,b4420-dcsr-cnpc", "fsl,dcsr-cnpc";
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,b4420-dcsr-dpaa", "fsl,dcsr-dpaa";
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,b4420-dcsr-ocn", "fsl,dcsr-ocn";
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,b4420-dcsr-nal", "fsl,dcsr-nal";
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,b4420-dcsr-rcpm", "fsl,dcsr-rcpm";
+ };
+ dcsr-snpc@30000 {
+ compatible = "fsl,b4420-dcsr-snpc", "fsl,dcsr-snpc";
+ };
+ dcsr-snpc@31000 {
+ compatible = "fsl,b4420-dcsr-snpc", "fsl,dcsr-snpc";
+ };
+ dcsr-cpu-sb-proxy@108000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x108000 0x1000 0x109000 0x1000>;
+ };
+};
+
+&soc {
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,b4420-l3-cache-controller", "cache";
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,b4420-corenet-cf";
+ };
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,b4420-device-config", "fsl,qoriq-device-config-2.0";
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,b4420-clockgen", "fsl,qoriq-clockgen-2.0";
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,b4420-rcpm", "fsl,qoriq-rcpm-2.0";
+ };
+
+ L2: l2-cache-controller@c20000 {
+ compatible = "fsl,b4420-l2-cache-controller";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
new file mode 100644
index 000000000000..7b4426e0a5a5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
@@ -0,0 +1,73 @@
+/*
+ * B4420 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "fsl,B4420";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ };
+
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e6500@0 {
+ device_type = "cpu";
+ reg = <0 1>;
+ next-level-cache = <&L2>;
+ };
+ cpu1: PowerPC,e6500@2 {
+ device_type = "cpu";
+ reg = <2 3>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
new file mode 100644
index 000000000000..e5cf6c81dd66
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -0,0 +1,142 @@
+/*
+ * B4860 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "b4si-post.dtsi"
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,b4860-pcie", "fsl,qoriq-pcie-v2.4";
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,iommu-parent = <&pamu0>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ fsl,liodn-reg = <&guts 0x510>; /* RIO1LIODNR */
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ fsl,liodn-reg = <&guts 0x514>; /* RIO2LIODNR */
+ };
+};
+
+&dcsr {
+ dcsr-epu@0 {
+ compatible = "fsl,b4860-dcsr-epu", "fsl,dcsr-epu";
+ };
+ dcsr-npc {
+ compatible = "fsl,b4860-dcsr-cnpc", "fsl,dcsr-cnpc";
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,b4860-dcsr-dpaa", "fsl,dcsr-dpaa";
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,b4860-dcsr-ocn", "fsl,dcsr-ocn";
+ };
+ dcsr-ddr@13000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr2>;
+ reg = <0x13000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,b4860-dcsr-nal", "fsl,dcsr-nal";
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,b4860-dcsr-rcpm", "fsl,dcsr-rcpm";
+ };
+ dcsr-snpc@30000 {
+ compatible = "fsl,b4860-dcsr-snpc", "fsl,dcsr-snpc";
+ };
+ dcsr-snpc@31000 {
+ compatible = "fsl,b4860-dcsr-snpc", "fsl,dcsr-snpc";
+ };
+ dcsr-cpu-sb-proxy@108000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x108000 0x1000 0x109000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@110000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu2>;
+ reg = <0x110000 0x1000 0x111000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@118000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu3>;
+ reg = <0x118000 0x1000 0x119000 0x1000>;
+ };
+};
+
+&soc {
+ ddr2: memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 9>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,b4860-l3-cache-controller", "cache";
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,b4860-corenet-cf";
+ };
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,b4860-device-config", "fsl,qoriq-device-config-2.0";
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,b4860-clockgen", "fsl,qoriq-clockgen-2.0";
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,b4860-rcpm", "fsl,qoriq-rcpm-2.0";
+ };
+
+ L2: l2-cache-controller@c20000 {
+ compatible = "fsl,b4860-l2-cache-controller";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
new file mode 100644
index 000000000000..5263fa46a3fb
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
@@ -0,0 +1,83 @@
+/*
+ * B4860 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "fsl,B4860";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ };
+
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e6500@0 {
+ device_type = "cpu";
+ reg = <0 1>;
+ next-level-cache = <&L2>;
+ };
+ cpu1: PowerPC,e6500@2 {
+ device_type = "cpu";
+ reg = <2 3>;
+ next-level-cache = <&L2>;
+ };
+ cpu2: PowerPC,e6500@4 {
+ device_type = "cpu";
+ reg = <4 5>;
+ next-level-cache = <&L2>;
+ };
+ cpu3: PowerPC,e6500@6 {
+ device_type = "cpu";
+ reg = <6 7>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
new file mode 100644
index 000000000000..73991547c69b
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -0,0 +1,268 @@
+/*
+ * B4420 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+&ifc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ interrupts = <25 2 0 0>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,b4-pcie", "fsl,qoriq-pcie-v2.4";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ interrupts = <20 2 0 0>;
+ fsl,iommu-parent = <&pamu0>;
+ pcie@0 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ reg = <0 0 0 0 0>;
+ interrupts = <20 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,b4-dcsr-epu", "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0
+ 94 2 0 0
+ 95 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,b4-dcsr-cnpc", "fsl,dcsr-cnpc";
+ reg = <0x1000 0x1000 0x1002000 0x10000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0x1A000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,b4-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,b4-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,b4-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,b4-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-snpc@30000 {
+ compatible = "fsl,b4-dcsr-snpc", "fsl,dcsr-snpc";
+ reg = <0x30000 0x1000 0x1022000 0x10000>;
+ };
+ dcsr-snpc@31000 {
+ compatible = "fsl,b4-dcsr-snpc", "fsl,dcsr-snpc";
+ reg = <0x31000 0x1000 0x1042000 0x10000>;
+ };
+ dcsr-cpu-sb-proxy@100000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x100000 0x1000 0x101000 0x1000>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 2>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 8>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,b4-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000>;
+ interrupts = <16 2 1 4>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,b4-corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 0>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 1>;
+
+
+ /* PCIe, DMA, SRIO */
+ pamu0: pamu@0 {
+ reg = <0 0x1000>;
+ fsl,primary-cache-geometry = <8 1>;
+ fsl,secondary-cache-geometry = <32 2>;
+ };
+
+ /* AXI2, Maple */
+ pamu1: pamu@1000 {
+ reg = <0x1000 0x1000>;
+ fsl,primary-cache-geometry = <32 1>;
+ fsl,secondary-cache-geometry = <32 2>;
+ };
+
+ /* Q/BMan */
+ pamu2: pamu@2000 {
+ reg = <0x2000 0x1000>;
+ fsl,primary-cache-geometry = <32 1>;
+ fsl,secondary-cache-geometry = <32 2>;
+ };
+
+ /* AXI1, FMAN */
+ pamu3: pamu@3000 {
+ reg = <0x3000 0x1000>;
+ fsl,primary-cache-geometry = <32 1>;
+ fsl,secondary-cache-geometry = <32 2>;
+ };
+ };
+
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,b4-device-config";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ fsl,liodn-bits = <12>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
+ reg = <0xe1000 0x1000>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,b4-rcpm", "fsl,qoriq-rcpm-2.0";
+ reg = <0xe2000 0x1000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+ dma@100300 {
+ fsl,iommu-parent = <&pamu0>;
+ fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
+ };
+
+/include/ "qoriq-dma-1.dtsi"
+ dma@101300 {
+ fsl,iommu-parent = <&pamu0>;
+ fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
+ };
+
+/include/ "qonverge-usb2-dr-0.dtsi"
+ usb0: usb@210000 {
+ compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr";
+ fsl,iommu-parent = <&pamu1>;
+ fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */
+ };
+
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-esdhc-0.dtsi"
+ sdhc@114000 {
+ sdhci,auto-cmd12;
+ fsl,iommu-parent = <&pamu1>;
+ fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */
+ };
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-sec5.3-0.dtsi"
+
+ L2: l2-cache-controller@c20000 {
+ compatible = "fsl,b4-l2-cache-controller";
+ reg = <0xc20000 0x1000>;
+ next-level-cache = <&cpc>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi
index 870c6535a053..ea145c91cfbd 100644
--- a/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi
+++ b/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi
@@ -53,6 +53,7 @@
power-isa-mmc; // Memory Coherence
power-isa-scpm; // Store Conditional Page Mobility
power-isa-wt; // Wait
+ fsl,eref-deo; // Data Cache Extended Operations
mmu-type = "power-embedded";
};
};
diff --git a/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi
index 3230212f7ad5..c254c981ae87 100644
--- a/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi
+++ b/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi
@@ -54,6 +54,7 @@
power-isa-scpm; // Store Conditional Page Mobility
power-isa-wt; // Wait
power-isa-64; // 64-bit
+ fsl,eref-deo; // Data Cache Extended Operations
mmu-type = "power-embedded";
};
};
diff --git a/arch/powerpc/boot/dts/fsl/e6500_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e6500_power_isa.dtsi
new file mode 100644
index 000000000000..a912dbeff359
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/e6500_power_isa.dtsi
@@ -0,0 +1,65 @@
+/*
+ * e6500 Power ISA Device Tree Source (include)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+ cpus {
+ power-isa-version = "2.06";
+ power-isa-b; // Base
+ power-isa-e; // Embedded
+ power-isa-atb; // Alternate Time Base
+ power-isa-cs; // Cache Specification
+ power-isa-ds; // Decorated Storage
+ power-isa-e.ed; // Embedded.Enhanced Debug
+ power-isa-e.pd; // Embedded.External PID
+ power-isa-e.hv; // Embedded.Hypervisor
+ power-isa-e.le; // Embedded.Little-Endian
+ power-isa-e.pm; // Embedded.Performance Monitor
+ power-isa-e.pc; // Embedded.Processor Control
+ power-isa-ecl; // Embedded Cache Locking
+ power-isa-exp; // External Proxy
+ power-isa-fp; // Floating Point
+ power-isa-fp.r; // Floating Point.Record
+ power-isa-mmc; // Memory Coherence
+ power-isa-scpm; // Store Conditional Page Mobility
+ power-isa-wt; // Wait
+ power-isa-64; // 64-bit
+ power-isa-e.pt; // Embedded.Page Table
+ power-isa-e.hv.lrat; // Embedded.Hypervisor.LRAT
+ power-isa-e.em; // Embedded Multi-Threading
+ power-isa-v; // Vector (AltiVec)
+ fsl,eref-er; // Enhanced Reservations (Load and Reserve and Store Cond.)
+ fsl,eref-deo; // Data Cache Extended Operations
+ mmu-type = "power-embedded";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index 941fa159cefb..f1105bffa915 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -148,6 +148,7 @@
crypto: crypto@300000 {
compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ fsl,sec-era = <3>;
#address-cells = <1>;
#size-cells = <1>;
reg = <0x30000 0x10000>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 69ac1acd4349..dc6cc5afd189 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -155,7 +155,7 @@
compatible = "fsl,dcsr", "simple-bus";
dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
+ compatible = "fsl,p2041-dcsr-epu", "fsl,dcsr-epu";
interrupts = <52 2 0 0
84 2 0 0
85 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index 9b5a81a4529c..3fa1e22d544a 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -182,7 +182,7 @@
compatible = "fsl,dcsr", "simple-bus";
dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
+ compatible = "fsl,p3041-dcsr-epu", "fsl,dcsr-epu";
interrupts = <52 2 0 0
84 2 0 0
85 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 19859ad851eb..34769a7eafea 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -156,7 +156,7 @@
compatible = "fsl,dcsr", "simple-bus";
dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
+ compatible = "fsl,p4080-dcsr-epu", "fsl,dcsr-epu";
interrupts = <52 2 0 0
84 2 0 0
85 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 9ea77c3513f6..bc3ae5a2252f 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -184,7 +184,7 @@
compatible = "fsl,dcsr", "simple-bus";
dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
+ compatible = "fsl,p5020-dcsr-epu", "fsl,dcsr-epu";
interrupts = <52 2 0 0
84 2 0 0
85 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 97f8c26f9709..a91897f6af09 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -129,7 +129,7 @@
compatible = "fsl,dcsr", "simple-bus";
dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
+ compatible = "fsl,p5040-dcsr-epu", "fsl,dcsr-epu";
interrupts = <52 2 0 0
84 2 0 0
85 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
index ffadcb563ada..bb3d8266b5ce 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
@@ -34,6 +34,7 @@
crypto@30000 {
compatible = "fsl,sec-v4.4", "fsl,sec-v4.0";
+ fsl,sec-era = <3>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x30000 0x10000>;
diff --git a/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
new file mode 100644
index 000000000000..29dad723091e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ Qonverge USB Host device tree stub [ controller @ offset 0x210000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@210000 {
+ compatible = "fsl-usb2-dr";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-gpio-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-gpio-1.dtsi
new file mode 100644
index 000000000000..c2f9cdadb604
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-gpio-1.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ GPIO device tree stub [ controller @ offset 0x131000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio1: gpio@131000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x131000 0x1000>;
+ interrupts = <54 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-gpio-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-gpio-2.dtsi
new file mode 100644
index 000000000000..33f3ccbac83f
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-gpio-2.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ GPIO device tree stub [ controller @ offset 0x132000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio2: gpio@132000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x132000 0x1000>;
+ interrupts = <86 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-gpio-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-gpio-3.dtsi
new file mode 100644
index 000000000000..86954e95ea02
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-gpio-3.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ GPIO device tree stub [ controller @ offset 0x133000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio3: gpio@133000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x133000 0x1000>;
+ interrupts = <87 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
index 0cbbac329539..02bee5fcbb9a 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
@@ -34,6 +34,7 @@
crypto: crypto@300000 {
compatible = "fsl,sec-v4.0";
+ fsl,sec-era = <1>;
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
index 7990e0d3d6f2..7f7574e53323 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
@@ -34,6 +34,7 @@
crypto: crypto@300000 {
compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ fsl,sec-era = <3>;
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec5.0-0.dtsi
index 3308986bba0d..e298efbb0f3e 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec5.0-0.dtsi
@@ -1,7 +1,7 @@
/*
- * QorIQ Sec/Crypto 4.1 device tree stub [ controller @ offset 0x300000 ]
+ * QorIQ Sec/Crypto 5.0 device tree stub [ controller @ offset 0x300000 ]
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,7 +33,8 @@
*/
crypto: crypto@300000 {
- compatible = "fsl,sec-v4.1", "fsl,sec-v4.0";
+ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
+ fsl,sec-era = <5>;
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
@@ -41,35 +42,35 @@ crypto: crypto@300000 {
interrupts = <92 2 0 0>;
sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.1-job-ring",
+ compatible = "fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <88 2 0 0>;
};
sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.1-job-ring",
+ compatible = "fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <89 2 0 0>;
};
sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.1-job-ring",
+ compatible = "fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x3000 0x1000>;
interrupts = <90 2 0 0>;
};
sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.1-job-ring",
+ compatible = "fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x4000 0x1000>;
interrupts = <91 2 0 0>;
};
rtic@6000 {
- compatible = "fsl,sec-v4.1-rtic",
+ compatible = "fsl,sec-v5.0-rtic",
"fsl,sec-v4.0-rtic";
#address-cells = <1>;
#size-cells = <1>;
@@ -77,25 +78,25 @@ crypto: crypto@300000 {
ranges = <0x0 0x6100 0xe00>;
rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.1-rtic-memory",
+ compatible = "fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
reg = <0x00 0x20 0x100 0x80>;
};
rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.1-rtic-memory",
+ compatible = "fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
reg = <0x20 0x20 0x200 0x80>;
};
rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.1-rtic-memory",
+ compatible = "fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
reg = <0x40 0x20 0x300 0x80>;
};
rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.1-rtic-memory",
+ compatible = "fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
reg = <0x60 0x20 0x500 0x80>;
};
@@ -103,7 +104,7 @@ crypto: crypto@300000 {
};
sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon";
+ compatible = "fsl,sec-v5.0-mon", "fsl,sec-v4.0-mon";
reg = <0x314000 0x1000>;
interrupts = <93 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi
index 7b2ab8a8c1f4..33ff09d52e05 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi
@@ -34,6 +34,7 @@
crypto: crypto@300000 {
compatible = "fsl,sec-v5.2", "fsl,sec-v5.0", "fsl,sec-v4.0";
+ fsl,sec-era = <5>;
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec5.3-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec5.3-0.dtsi
new file mode 100644
index 000000000000..08778221c194
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec5.3-0.dtsi
@@ -0,0 +1,119 @@
+/*
+ * QorIQ Sec/Crypto 5.3 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+ compatible = "fsl,sec-v5.3", "fsl,sec-v5.0", "fsl,sec-v4.0";
+ fsl,sec-era = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v5.3-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v5.3-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v5.3-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v5.3-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v5.3-rtic",
+ "fsl,sec-v5.0-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v5.3-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v5.3-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v5.3-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v5.3-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+};
+
+sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v5.3-mon", "fsl,sec-v5.0-mon", "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
new file mode 100644
index 000000000000..bd611a9cad32
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -0,0 +1,442 @@
+/*
+ * T4240 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&ifc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ interrupts = <25 2 0 0>;
+};
+
+/* controller at 0x240000 */
+&pci0 {
+ compatible = "fsl,t4240-pcie", "fsl,qoriq-pcie-v3.0";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ interrupts = <20 2 0 0>;
+ pcie@0 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ reg = <0 0 0 0 0>;
+ interrupts = <20 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x250000 */
+&pci1 {
+ compatible = "fsl,t4240-pcie", "fsl,qoriq-pcie-v3.0";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ interrupts = <21 2 0 0>;
+ pcie@0 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ reg = <0 0 0 0 0>;
+ interrupts = <21 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x260000 */
+&pci2 {
+ compatible = "fsl,t4240-pcie", "fsl,qoriq-pcie-v3.0";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ interrupts = <22 2 0 0>;
+ pcie@0 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ reg = <0 0 0 0 0>;
+ interrupts = <22 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x270000 */
+&pci3 {
+ compatible = "fsl,t4240-pcie", "fsl,qoriq-pcie-v3.0";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ interrupts = <23 2 0 0>;
+ pcie@0 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ reg = <0 0 0 0 0>;
+ interrupts = <23 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 43 1 0 0
+ 0000 0 0 2 &mpic 0 1 0 0
+ 0000 0 0 3 &mpic 4 1 0 0
+ 0000 0 0 4 &mpic 8 1 0 0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,t4240-dcsr-epu", "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0
+ 94 2 0 0
+ 95 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,t4240-dcsr-cnpc", "fsl,dcsr-cnpc";
+ reg = <0x1000 0x1000 0x1002000 0x10000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0x1A000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,t4240-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,t4240-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-ddr@13000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr2>;
+ reg = <0x13000 0x1000>;
+ };
+ dcsr-ddr@14000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr3>;
+ reg = <0x14000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,t4240-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,t4240-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-snpc@30000 {
+ compatible = "fsl,t4240-dcsr-snpc", "fsl,dcsr-snpc";
+ reg = <0x30000 0x1000 0x1022000 0x10000>;
+ };
+ dcsr-snpc@31000 {
+ compatible = "fsl,t4240-dcsr-snpc", "fsl,dcsr-snpc";
+ reg = <0x31000 0x1000 0x1042000 0x10000>;
+ };
+ dcsr-snpc@32000 {
+ compatible = "fsl,t4240-dcsr-snpc", "fsl,dcsr-snpc";
+ reg = <0x32000 0x1000 0x1062000 0x10000>;
+ };
+ dcsr-cpu-sb-proxy@100000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x100000 0x1000 0x101000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@108000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x108000 0x1000 0x109000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@110000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu2>;
+ reg = <0x110000 0x1000 0x111000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@118000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu3>;
+ reg = <0x118000 0x1000 0x119000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@120000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu4>;
+ reg = <0x120000 0x1000 0x121000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@128000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu5>;
+ reg = <0x128000 0x1000 0x129000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@130000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu6>;
+ reg = <0x130000 0x1000 0x131000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@138000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu7>;
+ reg = <0x138000 0x1000 0x139000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@140000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu8>;
+ reg = <0x140000 0x1000 0x141000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@148000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu9>;
+ reg = <0x148000 0x1000 0x149000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@150000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu10>;
+ reg = <0x150000 0x1000 0x151000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@158000 {
+ compatible = "fsl,dcsr-e6500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu11>;
+ reg = <0x158000 0x1000 0x159000 0x1000>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.7",
+ "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ ddr2: memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.7",
+ "fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 22>;
+ };
+
+ ddr3: memory-controller@a000 {
+ compatible = "fsl,qoriq-memory-controller-v4.7",
+ "fsl,qoriq-memory-controller";
+ reg = <0xa000 0x1000>;
+ interrupts = <16 2 1 21>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,t4240-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000
+ 0x12000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26
+ 16 2 1 25>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x6000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ fsl,liodn-bits = <12>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
+ reg = <0xe1000 0x1000>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
+ reg = <0xe2000 0x1000>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,t4240-sfp";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,t4240-serdes";
+ reg = <0xea000 0x4000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-esdhc-0.dtsi"
+ sdhc@114000 {
+ compatible = "fsl,t4240-esdhc", "fsl,esdhc";
+ sdhci,auto-cmd12;
+ };
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-gpio-1.dtsi"
+/include/ "qoriq-gpio-2.dtsi"
+/include/ "qoriq-gpio-3.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+ usb0: usb@210000 {
+ compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph";
+ phy_type = "utmi";
+ port0;
+ };
+/include/ "qoriq-usb2-dr-0.dtsi"
+ usb1: usb@211000 {
+ compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr";
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec5.0-0.dtsi"
+
+ L2_1: l2-cache-controller@c20000 {
+ compatible = "fsl,t4240-l2-cache-controller";
+ reg = <0xc20000 0x40000>;
+ next-level-cache = <&cpc>;
+ };
+ L2_2: l2-cache-controller@c60000 {
+ compatible = "fsl,t4240-l2-cache-controller";
+ reg = <0xc60000 0x40000>;
+ next-level-cache = <&cpc>;
+ };
+ L2_3: l2-cache-controller@ca0000 {
+ compatible = "fsl,t4240-l2-cache-controller";
+ reg = <0xca0000 0x40000>;
+ next-level-cache = <&cpc>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
new file mode 100644
index 000000000000..a93c55a88560
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
@@ -0,0 +1,128 @@
+/*
+ * T4240 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/include/ "e6500_power_isa.dtsi"
+
+/ {
+ compatible = "fsl,T4240";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ crypto = &crypto;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e6500@0 {
+ device_type = "cpu";
+ reg = <0 1>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu1: PowerPC,e6500@2 {
+ device_type = "cpu";
+ reg = <2 3>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu2: PowerPC,e6500@4 {
+ device_type = "cpu";
+ reg = <4 5>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu3: PowerPC,e6500@6 {
+ device_type = "cpu";
+ reg = <6 7>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu4: PowerPC,e6500@8 {
+ device_type = "cpu";
+ reg = <8 9>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu5: PowerPC,e6500@10 {
+ device_type = "cpu";
+ reg = <10 11>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu6: PowerPC,e6500@12 {
+ device_type = "cpu";
+ reg = <12 13>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu7: PowerPC,e6500@14 {
+ device_type = "cpu";
+ reg = <14 15>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu8: PowerPC,e6500@16 {
+ device_type = "cpu";
+ reg = <16 17>;
+ next-level-cache = <&L2_3>;
+ };
+ cpu9: PowerPC,e6500@18 {
+ device_type = "cpu";
+ reg = <18 19>;
+ next-level-cache = <&L2_3>;
+ };
+ cpu10: PowerPC,e6500@20 {
+ device_type = "cpu";
+ reg = <20 21>;
+ next-level-cache = <&L2_3>;
+ };
+ cpu11: PowerPC,e6500@22 {
+ device_type = "cpu";
+ reg = <22 23>;
+ next-level-cache = <&L2_3>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index 723e292b6b4e..bd14c00e5146 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -152,6 +152,8 @@
compatible = "fsl,mpc5121-sdhc";
reg = <0x1500 0x100>;
interrupts = <8 0x8>;
+ dmas = <&dma0 30>;
+ dma-names = "rx-tx";
};
i2c@1700 {
@@ -384,7 +386,7 @@
interrupts = <40 0x8>;
};
- dma@14000 {
+ dma0: dma@14000 {
compatible = "fsl,mpc5121-dma";
reg = <0x14000 0x1800>;
interrupts = <65 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts
index f269b1382ef7..7d3cb79185cb 100644
--- a/arch/powerpc/boot/dts/mpc5121ads.dts
+++ b/arch/powerpc/boot/dts/mpc5121ads.dts
@@ -13,7 +13,7 @@
/ {
model = "mpc5121ads";
- compatible = "fsl,mpc5121ads";
+ compatible = "fsl,mpc5121ads", "fsl,mpc5121";
nfc@40000000 {
/*
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
new file mode 100644
index 000000000000..4177b62240c2
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -0,0 +1,233 @@
+/*
+ * STx/Freescale ADS5125 MPC5125 silicon
+ *
+ * Copyright (C) 2009 Freescale Semiconductor Inc. All rights reserved.
+ *
+ * Reworked by Matteo Facchinetti (engineering@sirius-es.it)
+ * Copyright (C) 2013 Sirius Electronic Systems
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "mpc5125twr"; // In BSP "mpc5125ads"
+ compatible = "fsl,mpc5125ads", "fsl,mpc5125";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ipic>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ ethernet0 = &eth0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,5125@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <0x20>; // 32 bytes
+ i-cache-line-size = <0x20>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <49500000>;// 49.5 MHz (csb/4)
+ bus-frequency = <198000000>; // 198 MHz csb bus
+ clock-frequency = <396000000>; // 396 MHz ppc core
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; // 256MB at 0
+ };
+
+ sram@30000000 {
+ compatible = "fsl,mpc5121-sram";
+ reg = <0x30000000 0x08000>; // 32K at 0x30000000
+ };
+
+ soc@80000000 {
+ compatible = "fsl,mpc5121-immr";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ ranges = <0x0 0x80000000 0x400000>;
+ reg = <0x80000000 0x400000>;
+ bus-frequency = <66000000>; // 66 MHz ips bus
+
+ // IPIC
+ // interrupts cell = <intr #, sense>
+ // sense values match linux IORESOURCE_IRQ_* defines:
+ // sense == 8: Level, low assertion
+ // sense == 2: Edge, high-to-low change
+ //
+ ipic: interrupt-controller@c00 {
+ compatible = "fsl,mpc5121-ipic", "fsl,ipic";
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0xc00 0x100>;
+ };
+
+ rtc@a00 { // Real time clock
+ compatible = "fsl,mpc5121-rtc";
+ reg = <0xa00 0x100>;
+ interrupts = <79 0x8 80 0x8>;
+ };
+
+ reset@e00 { // Reset module
+ compatible = "fsl,mpc5125-reset";
+ reg = <0xe00 0x100>;
+ };
+
+ clock@f00 { // Clock control
+ compatible = "fsl,mpc5121-clock";
+ reg = <0xf00 0x100>;
+ };
+
+ pmc@1000{ // Power Management Controller
+ compatible = "fsl,mpc5121-pmc";
+ reg = <0x1000 0x100>;
+ interrupts = <83 0x2>;
+ };
+
+ gpio0: gpio@1100 {
+ compatible = "fsl,mpc5125-gpio";
+ reg = <0x1100 0x080>;
+ interrupts = <78 0x8>;
+ };
+
+ gpio1: gpio@1180 {
+ compatible = "fsl,mpc5125-gpio";
+ reg = <0x1180 0x080>;
+ interrupts = <86 0x8>;
+ };
+
+ can@1300 { // CAN rev.2
+ compatible = "fsl,mpc5121-mscan";
+ interrupts = <12 0x8>;
+ reg = <0x1300 0x80>;
+ };
+
+ can@1380 {
+ compatible = "fsl,mpc5121-mscan";
+ interrupts = <13 0x8>;
+ reg = <0x1380 0x80>;
+ };
+
+ sdhc@1500 {
+ compatible = "fsl,mpc5121-sdhc";
+ interrupts = <8 0x8>;
+ reg = <0x1500 0x100>;
+ };
+
+ i2c@1700 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1700 0x20>;
+ interrupts = <0x9 0x8>;
+ };
+
+ i2c@1720 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1720 0x20>;
+ interrupts = <0xa 0x8>;
+ };
+
+ i2c@1740 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1740 0x20>;
+ interrupts = <0xb 0x8>;
+ };
+
+ i2ccontrol@1760 {
+ compatible = "fsl,mpc5121-i2c-ctrl";
+ reg = <0x1760 0x8>;
+ };
+
+ diu@2100 {
+ compatible = "fsl,mpc5121-diu";
+ reg = <0x2100 0x100>;
+ interrupts = <64 0x8>;
+ };
+
+ mdio@2800 {
+ compatible = "fsl,mpc5121-fec-mdio";
+ reg = <0x2800 0x800>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <1>;
+ };
+ };
+
+ eth0: ethernet@2800 {
+ compatible = "fsl,mpc5125-fec";
+ reg = <0x2800 0x800>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <4 0x8>;
+ phy-handle = < &phy0 >;
+ phy-connection-type = "rmii";
+ };
+
+ // IO control
+ ioctl@a000 {
+ compatible = "fsl,mpc5125-ioctl";
+ reg = <0xA000 0x1000>;
+ };
+
+ usb@3000 {
+ compatible = "fsl,mpc5121-usb2-dr";
+ reg = <0x3000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <43 0x8>;
+ dr_mode = "host";
+ phy_type = "ulpi";
+ };
+
+ // 5125 PSCs are not 52xx or 5121 PSC compatible
+ // PSC1 uart0 aka ttyPSC0
+ serial@11100 {
+ compatible = "fsl,mpc5125-psc-uart", "fsl,mpc5125-psc";
+ reg = <0x11100 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ // PSC9 uart1 aka ttyPSC1
+ serial@11900 {
+ compatible = "fsl,mpc5125-psc-uart", "fsl,mpc5125-psc";
+ reg = <0x11900 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ pscfifo@11f00 {
+ compatible = "fsl,mpc5121-psc-fifo";
+ reg = <0x11f00 0x100>;
+ interrupts = <40 0x8>;
+ };
+
+ dma@14000 {
+ compatible = "fsl,mpc5121-dma"; // BSP name: "mpc512x-dma2"
+ reg = <0x14000 0x1800>;
+ interrupts = <65 0x8>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
index f8a3b3413176..6c723ee108cd 100644
--- a/arch/powerpc/boot/dts/mpc8536ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
@@ -32,7 +32,7 @@
reg = <0 0 0 0>; // Filled by U-Boot
};
- lbc: localbus@ffe05000 {
+ lbc: localbus@fffe05000 {
reg = <0xf 0xffe05000 0 0x1000>;
ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
@@ -44,7 +44,7 @@
ranges = <0x0 0xf 0xffe00000 0x100000>;
};
- pci0: pci@ffe08000 {
+ pci0: pci@fffe08000 {
reg = <0xf 0xffe08000 0 0x1000>;
ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000
0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>;
@@ -59,7 +59,7 @@
0x8800 0 0 4 &mpic 4 1 0 0>;
};
- pci1: pcie@ffe09000 {
+ pci1: pcie@fffe09000 {
reg = <0xf 0xffe09000 0 0x1000>;
ranges = <0x02000000 0 0xf8000000 0xc 0x18000000 0 0x08000000
0x01000000 0 0x00000000 0xf 0xffc20000 0 0x00010000>;
diff --git a/arch/powerpc/boot/dts/p1021rdb-pc.dtsi b/arch/powerpc/boot/dts/p1021rdb-pc.dtsi
index c13abfbbe2e2..d6274c58f496 100644
--- a/arch/powerpc/boot/dts/p1021rdb-pc.dtsi
+++ b/arch/powerpc/boot/dts/p1021rdb-pc.dtsi
@@ -62,11 +62,19 @@
};
partition@400000 {
- /* 11MB for JFFS2 based Root file System */
- reg = <0x00400000 0x00b00000>;
+ /* 10.75MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00ac0000>;
label = "NOR JFFS2 Root File System";
};
+ partition@ec0000 {
+ /* This location must not be altered */
+ /* 256KB for QE ucode firmware*/
+ reg = <0x00ec0000 0x00040000>;
+ label = "NOR QE microcode firmware";
+ read-only;
+ };
+
partition@f00000 {
/* This location must not be altered */
/* 512KB for u-boot Bootloader Image */
diff --git a/arch/powerpc/boot/dts/p1025rdb_36b.dts b/arch/powerpc/boot/dts/p1025rdb_36b.dts
index 4ce4bfa0eda4..06deb6f341ba 100644
--- a/arch/powerpc/boot/dts/p1025rdb_36b.dts
+++ b/arch/powerpc/boot/dts/p1025rdb_36b.dts
@@ -82,6 +82,11 @@
0x0 0x100000>;
};
};
+
+ qe: qe@fffe80000 {
+ status = "disabled"; /* no firmware loaded */
+ };
+
};
/include/ "p1025rdb.dtsi"
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 0b069477838a..74337403faee 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -17,7 +17,7 @@
/ {
model = "pdm360ng";
- compatible = "ifm,pdm360ng";
+ compatible = "ifm,pdm360ng", "fsl,mpc5121";
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&ipic>;
diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts
new file mode 100644
index 000000000000..0555976dd0f3
--- /dev/null
+++ b/arch/powerpc/boot/dts/t4240qds.dts
@@ -0,0 +1,224 @@
+/*
+ * T4240QDS Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t4240si-pre.dtsi"
+
+/ {
+ model = "fsl,T4240QDS";
+ compatible = "fsl,T4240QDS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ ifc: localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x2000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xff800000 0x00010000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x2 0x0 0x10000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 10MB for Linux Kernel Image */
+ reg = <0x00200000 0x00A00000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@C00000 {
+ /* 500MB for Root file System Image */
+ reg = <0x00c00000 0x1F400000>;
+ label = "NAND RFS Image";
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,t4240qds-fpga", "fsl,fpga-qixis";
+ reg = <3 0 0x300>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ dcsr: dcsr@f00000000 {
+ ranges = <0x00000000 0xf 0x00000000 0x01072000>;
+ };
+
+ soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "sst,sst25wf040";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ };
+ };
+
+ i2c@118000 {
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@52 {
+ compatible = "at24,24c256";
+ reg = <0x52>;
+ };
+ eeprom@53 {
+ compatible = "at24,24c256";
+ reg = <0x53>;
+ };
+ eeprom@54 {
+ compatible = "at24,24c256";
+ reg = <0x54>;
+ };
+ eeprom@55 {
+ compatible = "at24,24c256";
+ reg = <0x55>;
+ };
+ eeprom@56 {
+ compatible = "at24,24c256";
+ reg = <0x56>;
+ };
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
+ };
+ };
+ };
+
+ pci0: pcie@ffe240000 {
+ reg = <0xf 0xfe240000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe250000 {
+ reg = <0xf 0xfe250000 0 0x10000>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe260000 {
+ reg = <0xf 0xfe260000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci3: pcie@ffe270000 {
+ reg = <0xf 0xfe270000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
+};
+
+/include/ "fsl/t4240si-post.dtsi"
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 3d139fa04050..6c8b020806ff 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -1,14 +1,13 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
-# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
+CONFIG_ALTIVEC=y
CONFIG_SMP=y
-CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
+CONFIG_NR_CPUS=24
CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -22,10 +21,13 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
+CONFIG_B4_QDS=y
CONFIG_P5020_DS=y
CONFIG_P5040_DS=y
+CONFIG_T4240_QDS=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_BINFMT_MISC=m
+CONFIG_FSL_IFC=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
@@ -58,16 +60,33 @@ CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
+CONFIG_FTL=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -78,6 +97,7 @@ CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_E1000E=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -121,7 +141,16 @@ CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
@@ -129,6 +158,12 @@ CONFIG_NFSD=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_T10DIF=y
+CONFIG_CRC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
@@ -140,6 +175,5 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 211fcc9ed700..0d0d981442fd 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -13,7 +13,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_CHRP is not set
CONFIG_PPC_MPC512x=y
CONFIG_MPC5121_ADS=y
-CONFIG_MPC5121_GENERIC=y
+CONFIG_MPC512x_GENERIC=y
CONFIG_PDM360NG=y
# CONFIG_PPC_PMAC is not set
CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index cf815e847cdc..5a58882e351e 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -1,13 +1,12 @@
CONFIG_PPC_85xx=y
CONFIG_PHYS_64BIT=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -48,6 +47,7 @@ CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_FSL_IFC=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
@@ -79,18 +79,33 @@ CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
@@ -106,6 +121,7 @@ CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
CONFIG_PATA_ALI=y
CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
@@ -113,6 +129,9 @@ CONFIG_DUMMY=y
CONFIG_FS_ENET=y
CONFIG_UCC_GETH=y
CONFIG_GIANFAR=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
@@ -132,7 +151,6 @@ CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
CONFIG_NVRAM=y
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
@@ -206,6 +224,15 @@ CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -224,13 +251,18 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_CRC_T10DIF=y
+CONFIG_CRC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_FSL_CAAM=y
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 8d00ea5b8a9f..165e6b32baef 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -50,6 +50,7 @@ CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_FSL_IFC=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
@@ -81,18 +82,33 @@ CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
@@ -108,6 +124,7 @@ CONFIG_SCSI_LOGGING=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
@@ -207,6 +224,15 @@ CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -225,6 +251,12 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_CRC_T10DIF=y
+CONFIG_CRC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 7a5c15fcc7cf..f79196232917 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -3,11 +3,11 @@ CONFIG_TUNE_CELL=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_LZMA=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_PERF_EVENTS is not set
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 08bd299c75b1..910194e9a1e2 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -53,7 +53,7 @@
#define smp_mb__after_clear_bit() smp_mb()
/* Macro for generating the ***_bits() functions */
-#define DEFINE_BITOP(fn, op, prefix, postfix) \
+#define DEFINE_BITOP(fn, op, prefix) \
static __inline__ void fn(unsigned long mask, \
volatile unsigned long *_p) \
{ \
@@ -66,16 +66,15 @@ static __inline__ void fn(unsigned long mask, \
PPC405_ERR77(0,%3) \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
- postfix \
: "=&r" (old), "+m" (*p) \
: "r" (mask), "r" (p) \
: "cc", "memory"); \
}
-DEFINE_BITOP(set_bits, or, "", "")
-DEFINE_BITOP(clear_bits, andc, "", "")
-DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "")
-DEFINE_BITOP(change_bits, xor, "", "")
+DEFINE_BITOP(set_bits, or, "")
+DEFINE_BITOP(clear_bits, andc, "")
+DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
+DEFINE_BITOP(change_bits, xor, "")
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index fb3245e928ea..26807e5aff51 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -52,6 +52,7 @@ struct cpu_spec {
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
+ unsigned int cpu_user_features2; /* Userland features v2 */
unsigned int mmu_features; /* MMU features */
/* cache line sizes */
@@ -151,7 +152,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000)
#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
-#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
@@ -172,7 +173,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_ICSWX LONG_ASM_CONST(0x0020000000000000)
#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000)
#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000)
-#define CPU_FTR_BCTAR LONG_ASM_CONST(0x0100000000000000)
+#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
@@ -223,8 +224,10 @@ extern const char *powerpc_base_platform;
/* We only set the TM feature if the kernel was compiled with TM supprt */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define CPU_FTR_TM_COMP CPU_FTR_TM
+#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
#else
#define CPU_FTR_TM_COMP 0
+#define PPC_FEATURE2_HTM_COMP 0
#endif
/* We need to mark all pages as being coherent if we're SMP or we have a
@@ -374,7 +377,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -421,8 +424,8 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
- CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | CPU_FTR_BCTAR | \
- CPU_FTR_TM_COMP)
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index f6813e919bb2..a5c6d83b5f60 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -16,10 +16,6 @@
*
* None of this really applies for Power Macintoshes. There is
* basically just enough here to get kernel/dma.c to compile.
- *
- * There may be some comments or restrictions made here which are
- * not valid for the PReP platform. Take what you read
- * with a grain of salt.
*/
#include <asm/io.h>
@@ -57,7 +53,6 @@
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
- * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
* On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ac9790fc3836..cc0655a702a7 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -61,6 +61,7 @@ typedef elf_vrregset_t elf_fpxregset_t;
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 05e6d2ee1db9..8e5fae8beaf6 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -414,7 +414,6 @@ label##_relon_hv: \
#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
- HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
__EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
@@ -427,6 +426,7 @@ label##_relon_hv: \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
+ HMT_MEDIUM_PPR_DISCARD; \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_TEST_PR)
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 097dee57a7a9..0df54646f968 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -18,7 +18,6 @@
#include <asm/feature-fixups.h>
/* firmware feature bitmask values */
-#define FIRMWARE_MAX_FEATURES 63
#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
@@ -51,6 +50,8 @@
#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
+#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#ifndef __ASSEMBLY__
@@ -65,7 +66,8 @@ enum {
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
- FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY,
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+ FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3147a2970125..3bdcfce2c42a 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -10,6 +10,9 @@ typedef struct {
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
+#ifdef CONFIG_PPC_DOORBELL
+ unsigned int doorbell_irqs;
+#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 62e11a32c4c2..f2498c8e595d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,9 +3,37 @@
#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
extern struct kmem_cache *hugepte_cache;
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * This should work for other subarchs too. But right now we use the
+ * new format only for 64bit book3s
+ */
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+ /*
+ * We have only four bits to encode, MMU page size
+ */
+ BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
+ return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
+}
+
+static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
+{
+ return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
+}
+
+#else
+
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
@@ -17,6 +45,9 @@ static inline unsigned int hugepd_shift(hugepd_t hpd)
return hpd.pd & HUGEPD_SHIFT_MASK;
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
unsigned pdshift)
{
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 4bc2c3dad6ad..cf4df8e2139a 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -270,6 +270,9 @@
#define H_SET_MODE 0x31C
#define MAX_HCALL_OPCODE H_SET_MODE
+/* Platform specific hcalls, used by KVM */
+#define H_RTAS 0xf000
+
#ifndef __ASSEMBLY__
/**
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e45c4947a772..d615b28dda82 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -95,15 +95,13 @@ static inline bool arch_irqs_disabled(void)
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
#endif
-static inline void hard_irq_disable(void)
-{
- __hard_irq_disable();
- get_paca()->soft_enabled = 0;
- get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
-}
-
-/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
-#define hard_irq_disable hard_irq_disable
+#define hard_irq_disable() do { \
+ __hard_irq_disable(); \
+ if (local_paca->soft_enabled) \
+ trace_hardirqs_off(); \
+ get_paca()->soft_enabled = 0; \
+ get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
+} while(0)
static inline bool lazy_irq_pending(void)
{
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index f94ef4213e9d..dd15e5e37d6d 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -15,10 +15,6 @@
extern int check_legacy_ioport(unsigned long base_port);
#define I8042_DATA_REG 0x60
#define FDC_BASE 0x3f0
-/* only relevant for PReP */
-#define _PIDXR 0x279
-#define _PNPWRP 0xa79
-#define PNPBIOS_BASE 0xf000
#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
extern struct pci_dev *isa_bridge_pcidev;
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index aabcdba8f6b0..b9dd382cb349 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -67,6 +67,10 @@
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
+/* altivec */
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
+
/* book3s */
#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 5a56e1c5f851..349ed85c7d61 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -142,6 +142,8 @@ extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec);
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
@@ -156,7 +158,8 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
-extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
+extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
+ unsigned long gpa, bool dirty);
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
@@ -458,6 +461,8 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
#define OSI_SC_MAGIC_R4 0x77810F9B
#define INS_DCBZ 0x7c0007ec
+/* TO = 31 for unconditional trap */
+#define INS_TW 0x7fe00008
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 38bec1dc9928..9c1ff330c805 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -268,4 +268,17 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
}
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+/*
+ * Note modification of an HPTE; set the HPTE modified bit
+ * if anyone is interested.
+ */
+static inline void note_hpte_modification(struct kvm *kvm,
+ struct revmap_entry *rev)
+{
+ if (atomic_read(&kvm->arch.hpte_mod_interest))
+ rev->guest_rpte |= HPTE_GR_MODIFIED;
+}
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index cdc3d2717cc6..9039d3c97eec 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -20,6 +20,11 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__
+/* XICS ICP register offsets */
+#define XICS_XIRR 4
+#define XICS_MFRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -81,10 +86,11 @@ struct kvmppc_host_state {
#ifdef CONFIG_KVM_BOOK3S_64_HV
u8 hwthread_req;
u8 hwthread_state;
-
+ u8 host_ipi;
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys;
+ u32 saved_xirr;
u64 dabr;
u64 host_mmcr[3];
u32 host_pmc[8];
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index b7cd3356a532..d3c1eb34c986 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -26,6 +26,8 @@
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS 64
+#define KVMPPC_INST_EHPRIV 0x7c00021c
+
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d1bb86074721..af326cde7cb6 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -44,6 +44,10 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
+/* These values are internal and can be increased later */
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 256
+
#if !defined(CONFIG_KVM_440)
#include <linux/mmu_notifier.h>
@@ -188,6 +192,10 @@ struct kvmppc_linear_info {
int type;
};
+/* XICS components, defined in book3s_xics.c */
+struct kvmppc_xics;
+struct kvmppc_icp;
+
/*
* The reverse mapping array has one entry for each HPTE,
* which stores the guest's view of the second word of the HPTE
@@ -255,6 +263,13 @@ struct kvm_arch {
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
+ struct list_head rtas_tokens;
+#endif
+#ifdef CONFIG_KVM_MPIC
+ struct openpic *mpic;
+#endif
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_xics *xics;
#endif
};
@@ -301,11 +316,13 @@ struct kvmppc_vcore {
* that a guest can register.
*/
struct kvmppc_vpa {
+ unsigned long gpa; /* Current guest phys addr */
void *pinned_addr; /* Address in kernel linear mapping */
void *pinned_end; /* End of region */
unsigned long next_gpa; /* Guest phys addr for update */
unsigned long len; /* Number of bytes required */
u8 update_pending; /* 1 => update pinned_addr from next_gpa */
+ bool dirty; /* true => area has been modified by kernel */
};
struct kvmppc_pte {
@@ -359,6 +376,11 @@ struct kvmppc_slb {
#define KVMPPC_BOOKE_MAX_IAC 4
#define KVMPPC_BOOKE_MAX_DAC 2
+/* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
+#define KVMPPC_EPR_NONE 0 /* EPR not supported */
+#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
+#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
+
struct kvmppc_booke_debug_reg {
u32 dbcr0;
u32 dbcr1;
@@ -370,6 +392,12 @@ struct kvmppc_booke_debug_reg {
u64 dac[KVMPPC_BOOKE_MAX_DAC];
};
+#define KVMPPC_IRQ_DEFAULT 0
+#define KVMPPC_IRQ_MPIC 1
+#define KVMPPC_IRQ_XICS 2
+
+struct openpic;
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -502,8 +530,11 @@ struct kvm_vcpu_arch {
spinlock_t wdt_lock;
struct timer_list wdt_timer;
u32 tlbcfg[4];
+ u32 tlbps[4];
u32 mmucfg;
+ u32 eptcfg;
u32 epr;
+ u32 crit_save;
struct kvmppc_booke_debug_reg dbg_reg;
#endif
gpa_t paddr_accessed;
@@ -521,7 +552,7 @@ struct kvm_vcpu_arch {
u8 sane;
u8 cpu_type;
u8 hcall_needed;
- u8 epr_enabled;
+ u8 epr_flags; /* KVMPPC_EPR_xxx */
u8 epr_needed;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
@@ -548,6 +579,13 @@ struct kvm_vcpu_arch {
unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
+ int irq_type; /* one of KVM_IRQ_* */
+ int irq_cpu_id;
+ struct openpic *mpic; /* KVM_IRQ_MPIC */
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_icp *icp; /* XICS presentation controller */
+#endif
+
#ifdef CONFIG_KVM_BOOK3S_64_HV
struct kvm_vcpu_arch_shared shregs;
@@ -588,5 +626,6 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_FQPR 0x0060
#define __KVM_HAVE_ARCH_WQP
+#define __KVM_HAVE_CREATE_DEVICE
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 44a657adf416..a5287fe03d77 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -44,7 +44,7 @@ enum emulation_result {
EMULATE_DO_DCR, /* kvm_run filled with DCR request */
EMULATE_FAIL, /* can't emulate this instruction */
EMULATE_AGAIN, /* something went wrong. go again */
- EMULATE_DO_PAPR, /* kvm_run filled with PAPR request */
+ EMULATE_EXIT_USER, /* emulation requires exit to user-space */
};
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -104,8 +104,7 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
-extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
- struct kvm_interrupt *irq);
+extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -131,6 +130,7 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm,
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
@@ -152,7 +152,7 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old);
+ const struct kvm_memory_slot *old);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
@@ -165,6 +165,18 @@ extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+
+extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
+extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
+extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
@@ -246,12 +258,29 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+struct openpic;
+
#ifdef CONFIG_KVM_BOOK3S_64_HV
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
paca[cpu].kvm_hstate.xics_phys = addr;
}
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ u32 xirr = get_paca()->kvm_hstate.saved_xirr;
+
+ get_paca()->kvm_hstate.saved_xirr = 0;
+
+ return xirr;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{
+ paca[cpu].kvm_hstate.host_ipi = host_ipi;
+}
+
+extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
extern void kvm_linear_init(void);
#else
@@ -260,6 +289,46 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
static inline void kvm_linear_init(void)
{}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_kick(vcpu);
+}
+#endif
+
+#ifdef CONFIG_KVM_XICS
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
+}
+extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
+extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
+extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+#else
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
+ unsigned long server)
+ { return -EINVAL; }
+static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
+ struct kvm_irq_level *args)
+ { return -ENOTTY; }
+static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+ { return 0; }
#endif
static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
@@ -271,6 +340,32 @@ static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
#endif
}
+#ifdef CONFIG_KVM_MPIC
+
+void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
+int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+ u32 cpu);
+void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
+
+#else
+
+static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu)
+{
+ return -EINVAL;
+}
+
+static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
+ struct kvm_vcpu *vcpu)
+{
+}
+
+#endif /* CONFIG_KVM_MPIC */
+
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
struct kvm_config_tlb *cfg);
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
@@ -283,8 +378,15 @@ void kvmppc_init_lpid(unsigned long nr_lpids);
static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
{
- /* Clear i-cache for new pages */
struct page *page;
+ /*
+ * We can only access pages that the kernel maps
+ * as memory. Bail out for unmapped ones.
+ */
+ if (!pfn_valid(pfn))
+ return;
+
+ /* Clear i-cache for new pages */
page = pfn_to_page(pfn);
if (!test_bit(PG_arch_1, &page->flags)) {
flush_dcache_icache_page(page);
@@ -324,4 +426,6 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
return ea;
}
+extern void xics_wake_cpu(int cpu);
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
new file mode 100644
index 000000000000..b36f650a13ff
--- /dev/null
+++ b/arch/powerpc/include/asm/linkage.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_POWERPC_LINKAGE_H
+#define _ASM_POWERPC_LINKAGE_H
+
+#ifdef CONFIG_PPC64
+#define cond_syscall(x) \
+ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
+ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
+ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
+#endif
+
+#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 3d6b4100dac1..92386fc4e82a 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -29,6 +29,7 @@ struct rtc_time;
struct file;
struct pci_controller;
struct kimage;
+struct pci_host_bridge;
struct machdep_calls {
char *name;
@@ -50,7 +51,8 @@ struct machdep_calls {
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,
- int psize, int ssize);
+ int psize, int apsize,
+ int ssize);
long (*hpte_remove)(unsigned long hpte_group);
void (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
@@ -107,6 +109,8 @@ struct machdep_calls {
void (*pcibios_fixup)(void);
int (*pci_probe_mode)(struct pci_bus *);
void (*pci_irq_fixup)(struct pci_dev *dev);
+ int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
+ *bridge);
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 99d43e0c1e4a..936db360790a 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -215,6 +215,7 @@
#define TLBILX_T_CLASS3 7
#ifndef __ASSEMBLY__
+#include <asm/bug.h>
extern unsigned int tlbcam_index;
@@ -231,6 +232,10 @@ typedef struct {
u64 high_slices_psize; /* 4 bits per slice for now */
u16 user_psize; /* page size index */
#endif
+#ifdef CONFIG_PPC_64K_PAGES
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
/* Page size definitions, common between 32 and 64-bit
@@ -250,6 +255,23 @@ struct mmu_psize_def
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
/* The page sizes use the same names as 64-bit hash but are
* constants
*/
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index b59e06f507ea..2accc9611248 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -21,6 +21,7 @@
* complete pgtable.h but only a portion of it.
*/
#include <asm/pgtable-ppc64.h>
+#include <asm/bug.h>
/*
* Segment table
@@ -154,11 +155,29 @@ extern unsigned long htab_hash_mask;
struct mmu_psize_def
{
unsigned int shift; /* number of bits */
- unsigned int penc; /* HPTE encoding */
+ int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
#endif /* __ASSEMBLY__ */
@@ -181,6 +200,13 @@ struct mmu_psize_def
*/
#define VPN_SHIFT 12
+/*
+ * HPTE Large Page (LP) details
+ */
+#define LP_SHIFT 12
+#define LP_BITS 8
+#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
+
#ifndef __ASSEMBLY__
static inline int segment_shift(int ssize)
@@ -193,7 +219,6 @@ static inline int segment_shift(int ssize)
/*
* The current system page and segment sizes
*/
-extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
@@ -237,14 +262,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
/*
* This function sets the AVPN and L fields of the HPTE appropriately
- * for the page size
+ * using the base page size and actual page size.
*/
-static inline unsigned long hpte_encode_v(unsigned long vpn,
- int psize, int ssize)
+static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
+ int actual_psize, int ssize)
{
unsigned long v;
- v = hpte_encode_avpn(vpn, psize, ssize);
- if (psize != MMU_PAGE_4K)
+ v = hpte_encode_avpn(vpn, base_psize, ssize);
+ if (actual_psize != MMU_PAGE_4K)
v |= HPTE_V_LARGE;
return v;
}
@@ -254,19 +279,17 @@ static inline unsigned long hpte_encode_v(unsigned long vpn,
* for the page size. We assume the pa is already "clean" that is properly
* aligned for the requested page size
*/
-static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
+static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
+ int actual_psize)
{
- unsigned long r;
-
/* A 4K page needs no special encoding */
- if (psize == MMU_PAGE_4K)
+ if (actual_psize == MMU_PAGE_4K)
return pa & HPTE_R_RPN;
else {
- unsigned int penc = mmu_psize_defs[psize].penc;
- unsigned int shift = mmu_psize_defs[psize].shift;
- return (pa & ~((1ul << shift) - 1)) | (penc << 12);
+ unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
+ unsigned int shift = mmu_psize_defs[actual_psize].shift;
+ return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
}
- return r;
}
/*
@@ -319,7 +342,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
unsigned int shift, unsigned int mmu_psize);
extern void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
- int ssize, int psize, unsigned long pte);
+ int ssize, int psize, int lpsize,
+ unsigned long pte);
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long prot,
int psize, int ssize);
@@ -498,6 +522,10 @@ typedef struct {
unsigned long acop; /* mask of enabled coprocessor types */
unsigned int cop_pid; /* pid value used with coprocessors */
#endif /* CONFIG_PPC_ICSWX */
+#ifdef CONFIG_PPC_64K_PAGES
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a4b28f165b6c..b6c8b58b1d76 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -117,6 +117,7 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_SET_SLOT_LED_STATUS 55
#define OPAL_GET_EPOW_STATUS 56
#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_PCI_MSI_EOI 63
#ifndef __ASSEMBLY__
@@ -506,6 +507,7 @@ int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
uint8_t *p_bit, uint8_t *q_bit);
int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
uint8_t p_bit, uint8_t q_bit);
+int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
uint32_t xive_num);
int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e974f8a2..988c812aab5b 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -249,6 +249,7 @@ extern long long virt_phys_offset;
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
#endif
+#ifndef CONFIG_PPC_BOOK3S_64
/*
* Use the top bit of the higher-level page table entries to indicate whether
* the entries we point to contain hugepages. This works because we know that
@@ -260,6 +261,7 @@ extern long long virt_phys_offset;
#else
#define PD_HUGE 0x80000000
#endif
+#endif /* CONFIG_PPC_BOOK3S_64 */
/*
* Some number of bits at the level of the page table that points to
@@ -354,14 +356,27 @@ typedef unsigned long pgprot_t;
typedef struct { signed long pd; } hugepd_t;
#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int hugepd_ok(hugepd_t hpd)
+{
+ /*
+ * hugepd pointer, bottom two bits == 00 and next 4 bits
+ * indicate size of table
+ */
+ return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+}
+#else
static inline int hugepd_ok(hugepd_t hpd)
{
return (hpd.pd > 0);
}
+#endif
#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
+int pgd_huge(pgd_t pgd);
#else /* CONFIG_HUGETLB_PAGE */
#define is_hugepd(pdep) 0
+#define pgd_huge(pgd) 0
#endif /* CONFIG_HUGETLB_PAGE */
struct page;
@@ -378,7 +393,11 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
+#ifdef CONFIG_PPC_64K_PAGES
+typedef pte_t *pgtable_t;
+#else
typedef struct page *pgtable_t;
+#endif
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index cd915d6b093d..88693cef4f3d 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -99,8 +99,7 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr,
unsigned long len,
unsigned long flags,
unsigned int psize,
- int topdown,
- int use_cache);
+ int topdown);
extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 6dc2577932b1..a452968b29ea 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -21,9 +21,7 @@ static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
int count = 0;
int virq;
- for (np = NULL; (np = of_find_compatible_node(np,
- "parallel",
- "pnpPNP,400")) != NULL;) {
+ for_each_compatible_node(np, "parallel", "pnpPNP,400") {
prop = of_get_property(np, "reg", &propsize);
if (!prop || propsize > 6*sizeof(u32))
continue;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 025a130729bc..8b11b5bd9938 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -39,11 +39,6 @@ struct pci_controller {
resource_size_t io_base_phys;
resource_size_t pci_io_size;
- /* Some machines (PReP) have a non 1:1 mapping of
- * the PCI memory space in the CPU bus space
- */
- resource_size_t pci_mem_offset;
-
/* Some machines have a special region to forward the ISA
* "memory" cycles such as VGA memory regions. Left to 0
* if unsupported
@@ -70,6 +65,8 @@ struct pci_controller {
* BIG_ENDIAN - cfg_addr is a big endian register
* BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on
* the PLB4. Effectively disable MRM commands by setting this.
+ * FSL_CFG_REG_LINK - Freescale controller version in which the PCIe
+ * link status is in a RC PCIe cfg register (vs being a SoC register)
*/
#define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
#define PPC_INDIRECT_TYPE_EXT_REG 0x00000002
@@ -77,12 +74,14 @@ struct pci_controller {
#define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010
#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020
+#define PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK 0x00000040
u32 indirect_type;
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
*/
struct resource io_resource;
struct resource mem_resources[3];
+ resource_size_t mem_offset[3];
int global_number; /* PCI domain number */
resource_size_t dma_window_base_cur;
@@ -90,9 +89,9 @@ struct pci_controller {
#ifdef CONFIG_PPC64
unsigned long buid;
+#endif /* CONFIG_PPC64 */
void *private_data;
-#endif /* CONFIG_PPC64 */
};
/* These are used for config access before all the PCI probing
@@ -117,6 +116,12 @@ extern void setup_indirect_pci(struct pci_controller* hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags);
+extern int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val);
+
+extern int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val);
+
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
return bus->sysdata;
@@ -154,6 +159,8 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
+ int force_32bit_msi:1;
+
struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
struct eeh_dev *edev; /* eeh device */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index d0aec72722e9..f265049dd7d6 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -33,6 +33,8 @@ struct power_pmu {
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
+ u64 (*bhrb_filter_map)(u64 branch_sample_type);
+ void (*config_bhrb)(u64 pmu_bhrb_filter);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
@@ -42,6 +44,9 @@ struct power_pmu {
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+ /* BHRB entries in the PMU */
+ int bhrb_nr;
};
/*
@@ -52,6 +57,9 @@ struct power_pmu {
#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
+#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
+#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
+#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
/*
* Values for flags to get_alternatives()
@@ -65,6 +73,7 @@ extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long int read_bhrb(int n);
/*
* Only override the default definitions in include/linux/perf_event.h
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 580cf73b96e8..27b2386f738a 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -37,6 +37,17 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
static inline void pgtable_free(void *table, unsigned index_size)
{
BUG_ON(index_size); /* 32-bit doesn't use this */
@@ -45,4 +56,38 @@ static inline void pgtable_free(void *table, unsigned index_size)
#define check_pgt_cache() do { } while (0)
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ struct page *page = page_address(table);
+
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(page);
+ pgtable_free_tlb(tlb, page, 0);
+}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 292725cec2e3..91acb12bac92 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -35,7 +35,10 @@ struct vmemmap_backing {
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -72,8 +75,100 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page;
+ pte_t *pte;
+
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+ if (!index_size)
+ free_page((unsigned long)table);
+ else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
+}
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else /* !CONFIG_SMP */
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif /* CONFIG_SMP */
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ struct page *page = page_address(table);
+
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(page);
+ pgtable_free_tlb(tlb, page, 0);
+}
+
+#else /* if CONFIG_PPC_64K_PAGES */
+/*
+ * we support 16 fragments per PTE page.
+ */
+#define PTE_FRAG_NR 16
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
-#else /* CONFIG_PPC_64K_PAGES */
+extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
+extern void page_table_free(struct mm_struct *, unsigned long *, int);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+#ifdef CONFIG_SMP
+extern void __tlb_remove_table(void *_table);
+#endif
#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
@@ -83,51 +178,56 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pmd_set(pmd, (unsigned long)pte);
}
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ pmd_set(pmd, (unsigned long)pte_page);
}
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
- kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
+ return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)page_table_alloc(mm, address, 1);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page;
- pte_t *pte;
+ return (pgtable_t)page_table_alloc(mm, address, 0);
+}
- pte = pte_alloc_one_kernel(mm, address);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- pgtable_page_ctor(page);
- return page;
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ page_table_free(mm, (unsigned long *)pte, 1);
}
-static inline void pgtable_free(void *table, unsigned index_size)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
- if (!index_size)
- free_page((unsigned long)table);
- else {
- BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
- kmem_cache_free(PGT_CACHE(index_size), table);
- }
+ page_table_free(mm, (unsigned long *)ptepage, 0);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_free_tlb(tlb, table, 0);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
}
#define __pmd_free_tlb(tlb, pmd, addr) \
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index bf301ac62f35..e9a9f60e596d 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <linux/mm.h>
+#include <asm-generic/tlb.h>
#ifdef CONFIG_PPC_BOOK3E
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
@@ -13,56 +14,11 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
}
#endif /* !CONFIG_PPC_BOOK3E */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
#include <asm/pgalloc-32.h>
#endif
-#ifdef CONFIG_SMP
-struct mmu_gather;
-extern void tlb_remove_table(struct mmu_gather *, void *);
-
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
- unsigned long pgf = (unsigned long)table;
- BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
- pgf |= shift;
- tlb_remove_table(tlb, (void *)pgf);
-}
-
-static inline void __tlb_remove_table(void *_table)
-{
- void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
- unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
-
- pgtable_free(table, shift);
-}
-#else /* CONFIG_SMP */
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
-{
- pgtable_free(table, shift);
-}
-#endif /* !CONFIG_SMP */
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
- unsigned long address)
-{
- tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(ptepage);
- pgtable_free_tlb(tlb, page_address(ptepage), 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index be4e2878fbc0..45142d640720 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -4,10 +4,10 @@
#include <asm-generic/pgtable-nopud.h>
-#define PTE_INDEX_SIZE 12
-#define PMD_INDEX_SIZE 12
+#define PTE_INDEX_SIZE 8
+#define PMD_INDEX_SIZE 10
#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 6
+#define PGD_INDEX_SIZE 12
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 0182c203e411..e3d55f6f24fe 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -167,8 +167,7 @@
* Find an entry in a page-table-directory. We combine the address region
* (the high order N bits) and the pgd portion of the address.
*/
-/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index a9cbd3ba5c33..7aeb9555f6ea 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -17,6 +17,12 @@ struct mm_struct;
# include <asm/pgtable-ppc32.h>
#endif
+/*
+ * We save the slot number & secondary bit in the second half of the
+ * PTE page. We use the 8 bytes per each pte entry.
+ */
+#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
+
#ifndef __ASSEMBLY__
#include <asm/tlbflush.h>
@@ -212,6 +218,8 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr);
+extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 8752bc8e34a3..eccfc161e58e 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -82,6 +82,8 @@
#define __REGA0_R31 31
/* sorted alphabetically */
+#define PPC_INST_BHRBE 0x7c00025c
+#define PPC_INST_CLRBHRB 0x7c00035c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DCBAL 0x7c2005ec
@@ -113,6 +115,10 @@
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_STRING 0x7c00042a
@@ -297,6 +303,12 @@
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+/* BHRB instructions */
+#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
+#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
+ __PPC_RT(r) | \
+ (((n) & 0x3ff) << 11))
+
/* Transactional memory instructions */
#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT)
#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7ff9eaa3ea6c..d7e67ca8b4a6 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -40,7 +40,7 @@
* -- BenH.
*/
-/* PREP sub-platform types see residual.h for these */
+/* PREP sub-platform types. Unused */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
@@ -56,13 +56,6 @@
extern int _chrp_type;
-#ifdef CONFIG_PPC_PREP
-
-/* what kind of prep workstation we are */
-extern int _prep_type;
-
-#endif /* CONFIG_PPC_PREP */
-
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
/*
@@ -288,6 +281,9 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC_BOOK3S_64
unsigned long tar;
+ unsigned long ebbrr;
+ unsigned long ebbhr;
+ unsigned long bescr;
#endif
};
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 99c92d5363e4..bc2da154f68b 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -74,6 +74,75 @@ struct of_drconf_cell {
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
+/*
+ * There are two methods for telling firmware what our capabilities are.
+ * Newer machines have an "ibm,client-architecture-support" method on the
+ * root node. For older machines, we have to call the "process-elf-header"
+ * method in the /packages/elf-loader node, passing it a fake 32-bit
+ * ELF header containing a couple of PT_NOTE sections that contain
+ * structures that contain various information.
+ */
+
+/* New method - extensible architecture description vector. */
+
+/* Option vector bits - generic bits in byte 1 */
+#define OV_IGNORE 0x80 /* ignore this vector */
+#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
+
+/* Option vector 1: processor architectures supported */
+#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
+#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
+#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
+#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
+#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
+#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
+#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
+
+/* Option vector 2: Open Firmware options supported */
+#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
+
+/* Option vector 3: processor options supported */
+#define OV3_FP 0x80 /* floating point */
+#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
+
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
+/* Option vector 5: PAPR/OF options supported
+ * These bits are also used in firmware_has_feature() to validate
+ * the capabilities reported for vector 5 in the device tree so we
+ * encode the vector index in the define and use the OV5_FEAT()
+ * and OV5_INDX() macros to extract the desired information.
+ */
+#define OV5_FEAT(x) ((x) & 0xff)
+#define OV5_INDX(x) ((x) >> 8)
+#define OV5_LPAR 0x0280 /* logical partitioning supported */
+#define OV5_SPLPAR 0x0240 /* shared-processor LPAR supported */
+/* ibm,dynamic-reconfiguration-memory property supported */
+#define OV5_DRCONF_MEMORY 0x0220
+#define OV5_LARGE_PAGES 0x0210 /* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU 0x0202 /* donate dedicated CPU support */
+#define OV5_MSI 0x0201 /* PCIe/MSI support */
+#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x0440 /* Page Coalescing */
+#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
+#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
+#define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */
+#define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS 0x0F01 /* 1,2,or 4 Sub-Processors supported */
+
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX 0x02 /* Linux is our OS */
+
+/*
+ * The architecture vector has an array of PVR mask/value pairs,
+ * followed by # option vectors - 1, followed by the option vectors.
+ */
+extern unsigned char ibm_architecture_vec[];
+
/* These includes are put at the bottom because they may contain things
* that are overridden by this file. Ideally they shouldn't be included
* by this file, but there are a bunch of .c files that currently depend
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 5f995681bc1d..becc08e6a65c 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -92,7 +92,8 @@ static inline long regs_return_value(struct pt_regs *regs)
} while(0)
struct task_struct;
-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
+extern int ptrace_get_reg(struct task_struct *task, int regno,
+ unsigned long *data);
extern int ptrace_put_reg(struct task_struct *task, int regno,
unsigned long data);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c9c67fc888c9..a6136515c7f2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -267,7 +267,17 @@
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
+#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
+#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
+#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
+#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
+#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
+#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
+#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
@@ -290,6 +300,7 @@
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_MER_SH 11
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -631,6 +642,7 @@
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
+#define SPRN_MMCR2 769
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -649,6 +661,13 @@
#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
+#define SPRN_MMCRH 316 /* Hypervisor monitor mode control register */
+#define SPRN_MMCRS 894 /* Supervisor monitor mode control register */
+#define SPRN_MMCRC 851 /* Core monitor mode control register */
+#define SPRN_EBBHR 804 /* Event based branch handler register */
+#define SPRN_EBBRR 805 /* Event based branch return register */
+#define SPRN_BESCR 806 /* Branch event status and control register */
+
#define SPRN_PMC1 787
#define SPRN_PMC2 788
#define SPRN_PMC3 789
@@ -659,6 +678,11 @@
#define SPRN_PMC8 794
#define SPRN_SIAR 780
#define SPRN_SDAR 781
+#define SPRN_SIER 784
+#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
+#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
+#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
+#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
#define SPRN_PA6T_MMCR0 795
#define PA6T_MMCR0_EN0 0x0000000000000001UL
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index aef00c675905..a8bc2bb4adc9 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -143,6 +143,8 @@ struct rtas_suspend_me_data {
#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
+/* Platform Resource Reassignment Notification */
+#define RTAS_TYPE_PRRN 0xA0
/* RTAS check-exception vector offset */
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
@@ -277,6 +279,10 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+#ifdef CONFIG_PPC_PSERIES
+extern int pseries_devicetree_update(s32 scope);
+#endif
+
#ifdef CONFIG_PPC_RTAS_DAEMON
extern void rtas_cancel_event_scan(void);
#else
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 195ce2ac5691..ffbaabebcdca 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -143,6 +143,8 @@ extern void __cpu_die(unsigned int cpu);
/* for UP */
#define hard_smp_processor_id() get_hard_smp_processor_id(0)
#define smp_setup_cpu_maps()
+static inline void inhibit_secondary_onlining(void) {}
+static inline void uninhibit_secondary_onlining(void) {}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ebbec52d21bd..43523fe0d8b4 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
SYSCALL_SPU(capget)
SYSCALL_SPU(capset)
COMPAT_SYS(sigaltstack)
-SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile)
+COMPAT_SYS_SPU(sendfile)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
PPC_SYS(vfork)
@@ -230,7 +230,7 @@ COMPAT_SYS_SPU(sched_setaffinity)
COMPAT_SYS_SPU(sched_getaffinity)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,compat_sys_sendfile64_wrapper,sys_sendfile64)
+SYS32ONLY(sendfile64)
COMPAT_SYS_SPU(io_setup)
SYSCALL_SPU(io_destroy)
COMPAT_SYS_SPU(io_getevents)
@@ -239,7 +239,7 @@ SYSCALL_SPU(io_cancel)
SYSCALL(set_tid_address)
SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
SYSCALL(exit_group)
-SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+COMPAT_SYS(lookup_dcookie)
SYSCALL_SPU(epoll_create)
SYSCALL_SPU(epoll_ctl)
SYSCALL_SPU(epoll_wait)
@@ -273,8 +273,8 @@ COMPAT_SYS(mq_timedreceive)
COMPAT_SYS(mq_notify)
COMPAT_SYS(mq_getsetattr)
COMPAT_SYS(kexec_load)
-COMPAT_SYS(add_key)
-COMPAT_SYS(request_key)
+SYSCALL(add_key)
+SYSCALL(request_key)
COMPAT_SYS(keyctl)
COMPAT_SYS(waitid)
SYSCALL(ioprio_set)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 406b7b9a1341..8ceea14d6fe4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags)
#define is_32bit_task() (1)
#endif
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 852ed1b384f6..161ab662843b 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -71,6 +71,7 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int start_topology_update(void);
extern int stop_topology_update(void);
+extern int prrn_is_enabled(void);
#else
static inline int start_topology_update(void)
{
@@ -80,6 +81,10 @@ static inline int stop_topology_update(void)
{
return 0;
}
+static inline int prrn_is_enabled(void)
+{
+ return 0;
+}
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#include <asm-generic/topology.h>
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1487f0f12293..3ca819f541bf 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -56,11 +56,5 @@
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-/*
- * "Conditional" syscalls
- */
-#define cond_syscall(x) \
- asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index b532060d0916..23016020915e 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 4ae9a09c3b89..282d43a0c855 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -150,6 +150,7 @@ extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_migrate_irqs_away(void);
+extern void icp_native_eoi(struct irq_data *d);
#ifdef CONFIG_SMP
extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check);
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index ed9dd8156962..5b7657959faa 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -1,6 +1,7 @@
#ifndef _UAPI__ASM_POWERPC_CPUTABLE_H
#define _UAPI__ASM_POWERPC_CPUTABLE_H
+/* in AT_HWCAP */
#define PPC_FEATURE_32 0x80000000
#define PPC_FEATURE_64 0x40000000
#define PPC_FEATURE_601_INSTR 0x20000000
@@ -33,4 +34,12 @@
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
+/* in AT_HWCAP2 */
+#define PPC_FEATURE2_ARCH_2_07 0x80000000
+#define PPC_FEATURE2_HTM 0x40000000
+#define PPC_FEATURE2_DSCR 0x20000000
+#define PPC_FEATURE2_EBB 0x10000000
+#define PPC_FEATURE2_ISEL 0x08000000
+#define PPC_FEATURE2_TAR 0x04000000
+
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 16064d00adb9..0fb1a6e9ff90 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -25,6 +25,8 @@
/* Select powerpc specific features in <linux/kvm.h> */
#define __KVM_HAVE_SPAPR_TCE
#define __KVM_HAVE_PPC_SMT
+#define __KVM_HAVE_IRQCHIP
+#define __KVM_HAVE_IRQ_LINE
struct kvm_regs {
__u64 pc;
@@ -272,8 +274,31 @@ struct kvm_debug_exit_arch {
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
+ struct {
+ /* H/W breakpoint/watchpoint address */
+ __u64 addr;
+ /*
+ * Type denotes h/w breakpoint, read watchpoint, write
+ * watchpoint or watchpoint (both read and write).
+ */
+#define KVMPPC_DEBUG_NONE 0x0
+#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
+ __u32 type;
+ __u32 reserved;
+ } bp[16];
};
+/* Debug related defines */
+/*
+ * kvm_guest_debug->control is a 32 bit field. The lower 16 bits are generic
+ * and upper 16 bits are architecture specific. Architecture specific defines
+ * that ioctl is for setting hardware breakpoint or software breakpoint.
+ */
+#define KVM_GUESTDBG_USE_SW_BP 0x00010000
+#define KVM_GUESTDBG_USE_HW_BP 0x00020000
+
/* definition of registers in kvm_run */
struct kvm_sync_regs {
};
@@ -299,6 +324,12 @@ struct kvm_allocate_rma {
__u64 rma_size;
};
+/* for KVM_CAP_PPC_RTAS */
+struct kvm_rtas_token_args {
+ char name[120];
+ __u64 token; /* Use a token of 0 to undefine a mapping */
+};
+
struct kvm_book3e_206_tlb_entry {
__u32 mas8;
__u32 mas1;
@@ -359,6 +390,26 @@ struct kvm_get_htab_header {
__u16 n_invalid;
};
+/* Per-vcpu XICS interrupt controller state */
+#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
+
+#define KVM_REG_PPC_ICP_CPPR_SHIFT 56 /* current proc priority */
+#define KVM_REG_PPC_ICP_CPPR_MASK 0xff
+#define KVM_REG_PPC_ICP_XISR_SHIFT 32 /* interrupt status field */
+#define KVM_REG_PPC_ICP_XISR_MASK 0xffffff
+#define KVM_REG_PPC_ICP_MFRR_SHIFT 24 /* pending IPI priority */
+#define KVM_REG_PPC_ICP_MFRR_MASK 0xff
+#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
+#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
+
+/* Device control API: PPC-specific devices */
+#define KVM_DEV_MPIC_GRP_MISC 1
+#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
+
+#define KVM_DEV_MPIC_GRP_REGISTER 2 /* 32-bit */
+#define KVM_DEV_MPIC_GRP_IRQ_ACTIVE 3 /* 32-bit */
+
+/* One-Reg API: PPC-specific registers */
#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
@@ -417,4 +468,47 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
+/* Timer Status Register OR/CLEAR interface */
+#define KVM_REG_PPC_OR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x87)
+#define KVM_REG_PPC_CLEAR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x88)
+#define KVM_REG_PPC_TCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x89)
+#define KVM_REG_PPC_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8a)
+
+/* Debugging: Special instruction for software breakpoint */
+#define KVM_REG_PPC_DEBUG_INST (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8b)
+
+/* MMU registers */
+#define KVM_REG_PPC_MAS0 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8c)
+#define KVM_REG_PPC_MAS1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8d)
+#define KVM_REG_PPC_MAS2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8e)
+#define KVM_REG_PPC_MAS7_3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8f)
+#define KVM_REG_PPC_MAS4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x90)
+#define KVM_REG_PPC_MAS6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x91)
+#define KVM_REG_PPC_MMUCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x92)
+/*
+ * TLBnCFG fields TLBnCFG_N_ENTRY and TLBnCFG_ASSOC can be changed only using
+ * KVM_CAP_SW_TLB ioctl
+ */
+#define KVM_REG_PPC_TLB0CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x93)
+#define KVM_REG_PPC_TLB1CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x94)
+#define KVM_REG_PPC_TLB2CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x95)
+#define KVM_REG_PPC_TLB3CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x96)
+#define KVM_REG_PPC_TLB0PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x97)
+#define KVM_REG_PPC_TLB1PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x98)
+#define KVM_REG_PPC_TLB2PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x99)
+#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
+#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
+
+/* PPC64 eXternal Interrupt Controller Specification */
+#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
+
+/* Layout of 64-bit source attribute values */
+#define KVM_XICS_DESTINATION_SHIFT 0
+#define KVM_XICS_DESTINATION_MASK 0xffffffffULL
+#define KVM_XICS_PRIORITY_SHIFT 32
+#define KVM_XICS_PRIORITY_MASK 0xff
+#define KVM_XICS_LEVEL_SENSITIVE (1ULL << 40)
+#define KVM_XICS_MASKED (1ULL << 41)
+#define KVM_XICS_PENDING (1ULL << 42)
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/uapi/asm/linkage.h b/arch/powerpc/include/uapi/asm/linkage.h
deleted file mode 100644
index e1c4ac1cc4ba..000000000000
--- a/arch/powerpc/include/uapi/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_POWERPC_LINKAGE_H
-#define _ASM_POWERPC_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index 66b9ca4ee94a..77d2ed35b111 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -211,6 +211,7 @@ struct ppc_debug_info {
#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002
#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004
#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008
+#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a26dcaece509..a36daf3c6f9a 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -79,4 +79,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b6c17ec9b169..b51a97cfedf8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -124,6 +124,9 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE(THREAD_TAR, offsetof(struct thread_struct, tar));
+ DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
+ DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
+ DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
@@ -477,6 +480,7 @@ int main(void)
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
+ DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -573,6 +577,8 @@ int main(void)
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
+ HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
HSTATE_FIELD(HSTATE_PMC, host_pmc);
HSTATE_FIELD(HSTATE_PURR, host_purr);
@@ -596,6 +602,7 @@ int main(void)
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+ DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save));
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_KVM */
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index dcd881937f7a..0b9af015bedc 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -53,6 +53,15 @@ _GLOBAL(__e500_dcache_setup)
isync
blr
+_GLOBAL(__setup_cpu_e6500)
+ mflr r6
+#ifdef CONFIG_PPC64
+ bl .setup_altivec_ivors
+#endif
+ bl __setup_cpu_e5500
+ mtlr r6
+ blr
+
#ifdef CONFIG_PPC32
_GLOBAL(__setup_cpu_e200)
/* enable dedicated debug exception handling resources (Debug APU) */
@@ -107,6 +116,13 @@ _GLOBAL(__setup_cpu_e5500)
#endif
#ifdef CONFIG_PPC_BOOK3E_64
+_GLOBAL(__restore_cpu_e6500)
+ mflr r5
+ bl .setup_altivec_ivors
+ bl __restore_cpu_e5500
+ mtlr r5
+ blr
+
_GLOBAL(__restore_cpu_e5500)
mflr r4
bl __e500_icache_setup
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index ea847abb0d0a..a283b6442b26 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -49,6 +49,7 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
bl __init_FSCR
+ bl __init_PMU
bl __init_hvmode_206
mtlr r11
beqlr
@@ -57,22 +58,28 @@ _GLOBAL(__setup_cpu_power8)
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
+ bl __init_HFSCR
bl __init_TLB
+ bl __init_PMU_HV
mtlr r11
blr
_GLOBAL(__restore_cpu_power8)
mflr r11
bl __init_FSCR
+ bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
+ mtlr r11
beqlr
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
+ bl __init_HFSCR
bl __init_TLB
+ bl __init_PMU_HV
mtlr r11
blr
@@ -116,10 +123,17 @@ __init_LPCR:
__init_FSCR:
mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR
+ ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
mtspr SPRN_FSCR,r3
blr
+__init_HFSCR:
+ mfspr r3,SPRN_HFSCR
+ ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
+ HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB
+ mtspr SPRN_HFSCR,r3
+ blr
+
__init_TLB:
/* Clear the TLB */
li r6,128
@@ -131,3 +145,18 @@ __init_TLB:
bdnz 2b
ptesync
1: blr
+
+__init_PMU_HV:
+ li r5,0
+ mtspr SPRN_MMCRC,r5
+ mtspr SPRN_MMCRH,r5
+ blr
+
+__init_PMU:
+ li r5,0
+ mtspr SPRN_MMCRS,r5
+ mtspr SPRN_MMCRA,r5
+ mtspr SPRN_MMCR0,r5
+ mtspr SPRN_MMCR1,r5
+ mtspr SPRN_MMCR2,r5
+ blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 19599ef352bc..c60bbec25c1f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -74,7 +74,9 @@ extern void __restore_cpu_a2(void);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_e5500(void);
+extern void __restore_cpu_e6500(void);
#endif /* CONFIG_E500 */
/* This table only contains "desktop" CPUs, it need to be filled with embedded
@@ -96,10 +98,14 @@ extern void __restore_cpu_e5500(void);
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR)
#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
+ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
@@ -426,6 +432,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -441,6 +448,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER8 (architected)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -456,6 +464,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -473,6 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7+ (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -490,6 +500,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER8 (raw)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -1993,6 +2004,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E,
.icache_bsize = 32,
.dcache_bsize = 32,
@@ -2012,6 +2024,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
.icache_bsize = 32,
.dcache_bsize = 32,
@@ -2028,6 +2041,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e500mc",
.cpu_features = CPU_FTRS_E500MC,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
@@ -2046,6 +2060,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e5500",
.cpu_features = CPU_FTRS_E5500,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
@@ -2065,7 +2080,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_value = 0x80400000,
.cpu_name = "e6500",
.cpu_features = CPU_FTRS_E6500,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
@@ -2073,9 +2090,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e6500",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
- .cpu_setup = __setup_cpu_e5500,
+ .cpu_setup = __setup_cpu_e6500,
#ifndef CONFIG_PPC32
- .cpu_restore = __restore_cpu_e5500,
+ .cpu_restore = __restore_cpu_e6500,
#endif
.machine_check = machine_check_e500mc,
.platform = "ppce6500",
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index b3ba5163eae2..9ec3fe174cba 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
continue;
- ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
- init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
- free_page((unsigned long)__va(addr));
- totalram_pages++;
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
#endif
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 9ebbc24bb23c..d55c76c571f3 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -41,6 +41,8 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
+ __get_cpu_var(irq_stat).doorbell_irqs++;
+
smp_ipi_demux();
irq_exit();
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 256c5bf0adb7..3fe5259e2fea 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
+ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -458,7 +458,15 @@ BEGIN_FTR_SECTION
*/
mfspr r0,SPRN_TAR
std r0,THREAD_TAR(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_BCTAR)
+
+ /* Event based branch registers */
+ mfspr r0, SPRN_BESCR
+ std r0, THREAD_BESCR(r3)
+ mfspr r0, SPRN_EBBHR
+ std r0, THREAD_EBBHR(r3)
+ mfspr r0, SPRN_EBBRR
+ std r0, THREAD_EBBRR(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
#ifdef CONFIG_SMP
@@ -545,9 +553,17 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
+ /* Event based branch registers */
+ ld r0, THREAD_BESCR(r4)
+ mtspr SPRN_BESCR, r0
+ ld r0, THREAD_EBBHR(r4)
+ mtspr SPRN_EBBHR, r0
+ ld r0, THREAD_EBBRR(r4)
+ mtspr SPRN_EBBRR, r0
+
ld r0,THREAD_TAR(r4)
mtspr SPRN_TAR,r0
-END_FTR_SECTION_IFSET(CPU_FTR_BCTAR)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
#ifdef CONFIG_ALTIVEC
@@ -657,7 +673,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
- ldarx r4,0,r5
+0: ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 62c0dc237826..9f1ebf7338f1 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -17,6 +17,7 @@
#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
+#ifndef CONFIG_PPC64
/* epapr_ev_idle() was derived from e500_idle() */
_GLOBAL(epapr_ev_idle)
CURRENT_THREAD_INFO(r3, r1)
@@ -42,6 +43,7 @@ epapr_ev_idle_start:
* _TLF_NAPPING.
*/
b idle_loop
+#endif
/* Hypercall entry point. Will be patched with device tree instructions. */
.global epapr_hypercall_start
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index f3eab8594d9f..d44a571e45a7 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
#include <asm/code-patching.h>
#include <asm/machdep.h>
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
+#endif
bool epapr_paravirt_enabled;
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, insts[i]);
+#endif
}
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_property(hyper_node, "has-idle", NULL))
ppc_md.power_save = epapr_ev_idle;
+#endif
epapr_paravirt_enabled = true;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index ae54553eacd9..42a756eec9ff 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -299,6 +299,8 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
EXCEPTION_STUB(0x1c0, data_tlb_miss)
EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
+ EXCEPTION_STUB(0x200, altivec_unavailable) /* 0x0f20 */
+ EXCEPTION_STUB(0x220, altivec_assist) /* 0x1700 */
EXCEPTION_STUB(0x260, perfmon)
EXCEPTION_STUB(0x280, doorbell)
EXCEPTION_STUB(0x2a0, doorbell_crit)
@@ -395,6 +397,45 @@ interrupt_end_book3e:
bl .kernel_fp_unavailable_exception
b .ret_from_except
+/* Altivec Unavailable Interrupt */
+ START_EXCEPTION(altivec_unavailable);
+ NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
+ PROLOG_ADDITION_NONE)
+ /* we can probably do a shorter exception entry for that one... */
+ EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ ld r12,_MSR(r1)
+ andi. r0,r12,MSR_PR;
+ beq- 1f
+ bl .load_up_altivec
+ b fast_exception_return
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ INTS_DISABLE
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .altivec_unavailable_exception
+ b .ret_from_except
+
+/* AltiVec Assist */
+ START_EXCEPTION(altivec_assist);
+ NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST,
+ PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ bl .altivec_assist_exception
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#else
+ bl .unknown_exception
+#endif
+ b .ret_from_except
+
+
/* Decrementer Interrupt */
MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
decrementer, .timer_interrupt, ACK_DEC)
@@ -807,6 +848,7 @@ fast_exception_return:
BAD_STACK_TRAMPOLINE(0x000)
BAD_STACK_TRAMPOLINE(0x100)
BAD_STACK_TRAMPOLINE(0x200)
+BAD_STACK_TRAMPOLINE(0x220)
BAD_STACK_TRAMPOLINE(0x260)
BAD_STACK_TRAMPOLINE(0x280)
BAD_STACK_TRAMPOLINE(0x2a0)
@@ -1350,6 +1392,11 @@ _GLOBAL(__setup_base_ivors)
blr
+_GLOBAL(setup_altivec_ivors)
+ SET_IVOR(32, 0x200) /* AltiVec Unavailable */
+ SET_IVOR(33, 0x220) /* AltiVec Assist */
+ blr
+
_GLOBAL(setup_perfmon_ivor)
SET_IVOR(35, 0x260) /* Performance Monitor */
blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 200afa5bcfb7..e6eba1bf61ad 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -235,6 +235,7 @@ instruction_access_slb_pSeries:
.globl hardware_interrupt_hv;
hardware_interrupt_pSeries:
hardware_interrupt_hv:
+ HMT_MEDIUM_PPR_DISCARD
BEGIN_FTR_SECTION
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
EXC_HV, SOFTEN_TEST_HV)
@@ -254,7 +255,11 @@ hardware_interrupt_hv:
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
- MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
+ . = 0x900
+ .globl decrementer_pSeries
+decrementer_pSeries:
+ _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
+
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
@@ -688,9 +693,18 @@ slb_miss_user_pseries:
.align 7
.globl machine_check_common
machine_check_common:
+
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
FINISH_NAP
DISABLE_INTS
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .machine_check_exception
@@ -797,7 +811,7 @@ hardware_interrupt_relon_hv:
_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
FTR_SECTION_ELSE
_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
@@ -870,10 +884,6 @@ tm_unavailable_relon_pSeries_1:
. = 0x5500
b denorm_exception_hv
#endif
-#ifdef CONFIG_HVC_SCOM
- STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
-#endif /* CONFIG_HVC_SCOM */
STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
/* Other future vectors */
@@ -1066,78 +1076,6 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
- mflr r10
-#ifdef CONFIG_RELOCATABLE
- mtctr r11
-#endif
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate_realmode
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- 2f
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
- RESTORE_PPR_PACA(PACA_EXSLB, r9)
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-2: mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10,unrecov_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- rfid
- b .
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- ld r10,_LINK(r1) /* make idle task do the */
- std r10,_NIP(r1) /* equivalent of a blr */
- blr
-#endif
-
.align 7
.globl alignment_common
alignment_common:
@@ -1336,6 +1274,78 @@ _GLOBAL(opal_mc_secondary_handler)
/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+#ifdef CONFIG_RELOCATABLE
+ mtctr r11
+#endif
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- 2f
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+ RESTORE_PPR_PACA(PACA_EXSLB, r9)
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+2: mfspr r11,SPRN_SRR0
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10,unrecov_slb)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+ andc r9,r9,r10
+ std r9,TI_LOCAL_FLAGS(r11)
+ ld r10,_LINK(r1) /* make idle task do the */
+ std r10,_NIP(r1) /* equivalent of a blr */
+ blr
+#endif
+
+/*
* Hash table stuff
*/
.align 7
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 06c8202a69cf..2230fd0ca3e4 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end)
if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
continue;
- ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
- init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
- free_page((unsigned long)__va(addr));
- totalram_pages++;
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 7a2e5e421abf..97e2671cde7f 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -769,6 +769,8 @@ finish_tlb_load_47x:
*/
DEBUG_CRIT_EXCEPTION
+interrupt_end:
+
/*
* Global functions
*/
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0886ae6dd5be..b61363d557b5 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -509,6 +509,7 @@ _GLOBAL(copy_and_flush)
sync
addi r5,r5,8
addi r6,r6,8
+ isync
blr
.align 8
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 5f051eeb93a2..a620203f7de3 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -199,11 +199,6 @@
.align 5; \
label:
-#define FINISH_EXCEPTION(func) \
- bl transfer_to_handler_full; \
- .long func; \
- .long ret_from_except_full
-
#define EXCEPTION(n, intno, label, hdlr, xfer) \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG(intno); \
@@ -286,13 +281,13 @@ label:
andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
beq+ 2f; \
\
- lis r10,KERNELBASE@h; /* check if exception in vectors */ \
- ori r10,r10,KERNELBASE@l; \
+ lis r10,interrupt_base@h; /* check if exception in vectors */ \
+ ori r10,r10,interrupt_base@l; \
cmplw r12,r10; \
blt+ 2f; /* addr below exception vectors */ \
\
- lis r10,DebugDebug@h; \
- ori r10,r10,DebugDebug@l; \
+ lis r10,interrupt_end@h; \
+ ori r10,r10,interrupt_end@l; \
cmplw r12,r10; \
bgt+ 2f; /* addr above exception vectors */ \
\
@@ -339,13 +334,13 @@ label:
andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
beq+ 2f; \
\
- lis r10,KERNELBASE@h; /* check if exception in vectors */ \
- ori r10,r10,KERNELBASE@l; \
+ lis r10,interrupt_base@h; /* check if exception in vectors */ \
+ ori r10,r10,interrupt_base@l; \
cmplw r12,r10; \
blt+ 2f; /* addr below exception vectors */ \
\
- lis r10,DebugCrit@h; \
- ori r10,r10,DebugCrit@l; \
+ lis r10,interrupt_end@h; \
+ ori r10,r10,interrupt_end@l; \
cmplw r12,r10; \
bgt+ 2f; /* addr above exception vectors */ \
\
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 6f62a737f607..d10a7cacccd2 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -605,6 +605,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
/* Embedded Hypervisor Privilege */
EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
+interrupt_end:
+
/*
* Local functions
*/
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index ea78761aa169..939ea7ef0dc8 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -33,11 +33,6 @@
#include <asm/runlatch.h>
#include <asm/smp.h>
-#ifdef CONFIG_HOTPLUG_CPU
-#define cpu_should_die() cpu_is_offline(smp_processor_id())
-#else
-#define cpu_should_die() 0
-#endif
unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(cpuidle_disable);
@@ -50,64 +45,38 @@ static int __init powersave_off(char *arg)
}
__setup("powersave=off", powersave_off);
-/*
- * The body of the idle task.
- */
-void cpu_idle(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched() && !cpu_should_die()) {
- ppc64_runlatch_off();
-
- if (ppc_md.power_save) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- /*
- * smp_mb is so clearing of TIF_POLLING_NRFLAG
- * is ordered w.r.t. need_resched() test.
- */
- smp_mb();
- local_irq_disable();
-
- /* Don't trace irqs off for idle */
- stop_critical_timings();
-
- /* check again after disabling irqs */
- if (!need_resched() && !cpu_should_die())
- ppc_md.power_save();
-
- start_critical_timings();
-
- /* Some power_save functions return with
- * interrupts enabled, some don't.
- */
- if (irqs_disabled())
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- } else {
- /*
- * Go into low thread priority and possibly
- * low power mode.
- */
- HMT_low();
- HMT_very_low();
- }
- }
+ sched_preempt_enable_no_resched();
+ cpu_die();
+}
+#endif
- HMT_medium();
- ppc64_runlatch_on();
- rcu_idle_exit();
- tick_nohz_idle_exit();
- if (cpu_should_die()) {
- sched_preempt_enable_no_resched();
- cpu_die();
- }
- schedule_preempt_disabled();
+void arch_cpu_idle(void)
+{
+ ppc64_runlatch_off();
+
+ if (ppc_md.power_save) {
+ ppc_md.power_save();
+ /*
+ * Some power_save functions return with
+ * interrupts enabled, some don't.
+ */
+ if (irqs_disabled())
+ local_irq_enable();
+ } else {
+ local_irq_enable();
+ /*
+ * Go into low thread priority and possibly
+ * low power mode.
+ */
+ HMT_low();
+ HMT_very_low();
}
+
+ HMT_medium();
+ ppc64_runlatch_on();
}
int powersave_nap;
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index 4c7cb4008585..bfb73cc209ce 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -16,11 +16,13 @@
#include <asm/ppc-opcode.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
+#include <asm/epapr_hcalls.h>
/* 64-bit version only for now */
#ifdef CONFIG_PPC64
-_GLOBAL(book3e_idle)
+.macro BOOK3E_IDLE name loop
+_GLOBAL(\name)
/* Save LR for later */
mflr r0
std r0,16(r1)
@@ -67,7 +69,33 @@ _GLOBAL(book3e_idle)
/* We can now re-enable hard interrupts and go to sleep */
wrteei 1
-1: PPC_WAIT(0)
+ \loop
+
+.endm
+
+.macro BOOK3E_IDLE_LOOP
+1:
+ PPC_WAIT(0)
b 1b
+.endm
+
+/* epapr_ev_idle_start below is patched with the proper hcall
+ opcodes during kernel initialization */
+.macro EPAPR_EV_IDLE_LOOP
+idle_loop:
+ LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
+
+.global epapr_ev_idle_start
+epapr_ev_idle_start:
+ li r3, -1
+ nop
+ nop
+ nop
+ b idle_loop
+.endm
+
+BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP
+
+BOOK3E_IDLE book3e_idle BOOK3E_IDLE_LOOP
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 31c4fdc6859c..c0d0dbddfba1 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -102,7 +102,7 @@ static int __init fail_iommu_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
- return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+ return PTR_RET(dir);
}
late_initcall(fail_iommu_debugfs);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4f97fe345526..5cbcf4d5a808 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -374,6 +374,15 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
+#ifdef CONFIG_PPC_DOORBELL
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ seq_printf(p, "%*s: ", prec, "DBL");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
+ seq_printf(p, " Doorbell interrupts\n");
+ }
+#endif
+
return 0;
}
@@ -387,6 +396,9 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
+#ifdef CONFIG_PPC_DOORBELL
+ sum += per_cpu(irq_stat, cpu).doorbell_irqs;
+#endif
return sum;
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 5ca82cd4a374..c1eef241017a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -159,7 +159,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
+ backup_current_thread_info = kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
* On Book E and perhaps other processors, singlestep is handled on
* the critical exception stack. This causes current_thread_info()
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index a61b133c4f99..6782221d49bd 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void)
end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
/* Free the tmp space we don't need */
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, NULL);
}
static int __init kvm_guest_init(void)
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index f5725bce9ed2..d92f3871e9cf 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -41,8 +41,6 @@
/* #define LPARCFG_DEBUG */
-static struct proc_dir_entry *proc_ppc64_lparcfg;
-
/*
* Track sum of all purrs across all processors. This is used to further
* calculate usage values by different applications
@@ -301,6 +299,7 @@ static void parse_system_parameter_string(struct seq_file *m)
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
+ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
if (call_status != 0) {
@@ -688,27 +687,22 @@ static const struct file_operations lparcfg_fops = {
static int __init lparcfg_init(void)
{
- struct proc_dir_entry *ent;
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
/* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR))
mode |= S_IWUSR;
- ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
- if (!ent) {
+ if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
return -EIO;
}
-
- proc_ppc64_lparcfg = ent;
return 0;
}
static void __exit lparcfg_cleanup(void)
{
- if (proc_ppc64_lparcfg)
- remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
+ remove_proc_subtree("powerpc/lparcfg", NULL);
}
module_init(lparcfg_init);
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index bec1e930ed73..48fbc2b97e95 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -511,8 +511,7 @@ int __init nvram_scan_partitions(void)
"detected: 0-length partition\n");
goto out;
}
- tmp_part = (struct nvram_partition *)
- kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
+ tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
err = -ENOMEM;
if (!tmp_part) {
printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n");
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index fa12ae42d98c..f5c5c90799a7 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -785,22 +786,8 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
hose->isa_mem_size = size;
}
- /* We get the PCI/Mem offset from the first range or
- * the, current one if the offset came from an ISA
- * hole. If they don't match, bugger.
- */
- if (memno == 0 ||
- (isa_hole >= 0 && pci_addr != 0 &&
- hose->pci_mem_offset == isa_mb))
- hose->pci_mem_offset = cpu_addr - pci_addr;
- else if (pci_addr != 0 &&
- hose->pci_mem_offset != cpu_addr - pci_addr) {
- printk(KERN_INFO
- " \\--> Skipped (offset mismatch) !\n");
- continue;
- }
-
/* Build resource */
+ hose->mem_offset[memno] = cpu_addr - pci_addr;
res = &hose->mem_resources[memno++];
res->flags = IORESOURCE_MEM;
if (pci_space & 0x40000000)
@@ -816,20 +803,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->child = NULL;
}
}
-
- /* If there's an ISA hole and the pci_mem_offset is -not- matching
- * the ISA hole offset, then we need to remove the ISA hole from
- * the resource list for that brige
- */
- if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
- unsigned int next = isa_hole + 1;
- printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
- if (next < memno)
- memmove(&hose->mem_resources[isa_hole],
- &hose->mem_resources[next],
- sizeof(struct resource) * (memno - next));
- hose->mem_resources[--memno].flags = 0;
- }
}
/* Decide whether to display the domain number in /proc */
@@ -844,6 +817,14 @@ int pci_proc_domain(struct pci_bus *bus)
return 1;
}
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ if (ppc_md.pcibios_root_bridge_prepare)
+ return ppc_md.pcibios_root_bridge_prepare(bridge);
+
+ return 0;
+}
+
/* This header fixup will do the resource fixup for all devices as they are
* probed, but not for bridge ranges
*/
@@ -907,6 +888,7 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
struct pci_controller *hose = pci_bus_to_host(bus);
struct pci_dev *dev = bus->self;
resource_size_t offset;
+ struct pci_bus_region region;
u16 command;
int i;
@@ -916,10 +898,10 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
/* Job is a bit different between memory and IO */
if (res->flags & IORESOURCE_MEM) {
- /* If the BAR is non-0 (res != pci_mem_offset) then it's probably been
- * initialized by somebody
- */
- if (res->start != hose->pci_mem_offset)
+ pcibios_resource_to_bus(dev, &region, res);
+
+ /* If the BAR is non-0 then it's probably been initialized */
+ if (region.start != 0)
return 0;
/* The BAR is 0, let's check if memory decoding is enabled on
@@ -931,11 +913,11 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
/* Memory decoding is enabled and the BAR is 0. If any of the bridge
* resources covers that starting address (0 then it's good enough for
- * us for memory
+ * us for memory space)
*/
for (i = 0; i < 3; i++) {
if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
- hose->mem_resources[i].start == hose->pci_mem_offset)
+ hose->mem_resources[i].start == hose->mem_offset[i])
return 0;
}
@@ -1023,6 +1005,27 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pci_dma_bus_setup(bus);
}
+void pcibios_setup_device(struct pci_dev *dev)
+{
+ /* Fixup NUMA node as it may not be setup yet by the generic
+ * code and is needed by the DMA init
+ */
+ set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
+
+ /* Hook up default DMA ops */
+ set_dma_ops(&dev->dev, pci_dma_ops);
+ set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
+
+ /* Additional platform DMA/iommu setup */
+ if (ppc_md.pci_dma_dev_setup)
+ ppc_md.pci_dma_dev_setup(dev);
+
+ /* Read default IRQs and fixup if necessary */
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
+}
+
void pcibios_setup_bus_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1037,23 +1040,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- /* Fixup NUMA node as it may not be setup yet by the generic
- * code and is needed by the DMA init
- */
- set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
-
- /* Hook up default DMA ops */
- set_dma_ops(&dev->dev, pci_dma_ops);
- set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
-
- /* Additional platform DMA/iommu setup */
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
-
- /* Read default IRQs and fixup if necessary */
- pci_read_irq_line(dev);
- if (ppc_md.pci_irq_fixup)
- ppc_md.pci_irq_fixup(dev);
+ pcibios_setup_device(dev);
}
}
@@ -1367,10 +1354,9 @@ static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
no_io:
/* Check for memory */
- offset = hose->pci_mem_offset;
- pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
for (i = 0; i < 3; i++) {
pres = &hose->mem_resources[i];
+ offset = hose->mem_offset[i];
if (!(pres->flags & IORESOURCE_MEM))
continue;
pr_debug("hose mem res: %pR\n", pres);
@@ -1494,6 +1480,10 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if (ppc_md.pcibios_enable_device_hook(dev))
return -EINVAL;
+ /* avoid pcie irq fix up impact on cardbus */
+ if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
+ pcibios_setup_device(dev);
+
return pci_enable_resources(dev, mask);
}
@@ -1506,6 +1496,7 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
struct list_head *resources)
{
struct resource *res;
+ resource_size_t offset;
int i;
/* Hookup PHB IO resource */
@@ -1515,49 +1506,37 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
printk(KERN_WARNING "PCI: I/O resource not set for host"
" bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
-#ifdef CONFIG_PPC32
- /* Workaround for lack of IO resource only on 32-bit */
- res->start = (unsigned long)hose->io_base_virt - isa_io_base;
- res->end = res->start + IO_SPACE_LIMIT;
- res->flags = IORESOURCE_IO;
-#endif /* CONFIG_PPC32 */
- }
+ } else {
+ offset = pcibios_io_space_offset(hose);
- pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned long)res->flags);
- pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
+ pr_debug("PCI: PHB IO resource = %08llx-%08llx [%lx] off 0x%08llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned long)res->flags,
+ (unsigned long long)offset);
+ pci_add_resource_offset(resources, res, offset);
+ }
/* Hookup PHB Memory resources */
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
if (!res->flags) {
- if (i > 0)
- continue;
printk(KERN_ERR "PCI: Memory resource 0 not set for "
"host bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
-#ifdef CONFIG_PPC32
- /* Workaround for lack of MEM resource only on 32-bit */
- res->start = hose->pci_mem_offset;
- res->end = (resource_size_t)-1LL;
- res->flags = IORESOURCE_MEM;
-#endif /* CONFIG_PPC32 */
+ continue;
}
+ offset = hose->mem_offset[i];
- pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
+
+ pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
(unsigned long long)res->start,
(unsigned long long)res->end,
- (unsigned long)res->flags);
- pci_add_resource_offset(resources, res, hose->pci_mem_offset);
- }
-
- pr_debug("PCI: PHB MEM offset = %016llx\n",
- (unsigned long long)hose->pci_mem_offset);
- pr_debug("PCI: PHB IO offset = %08lx\n",
- (unsigned long)hose->io_base_virt - _IO_BASE);
+ (unsigned long)res->flags,
+ (unsigned long long)offset);
+ pci_add_resource_offset(resources, res, offset);
+ }
}
/*
@@ -1725,3 +1704,15 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+static void fixup_vga(struct pci_dev *pdev)
+{
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
+ vga_set_default_device(pdev);
+
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index e37c2152acf4..432459c817fa 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -295,7 +295,7 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
- return (long)hose->pci_mem_offset;
+ return (long)hose->mem_offset[0];
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 51a133a78a09..873050d26840 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -246,7 +246,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
- return (long)hose->pci_mem_offset;
+ return (long)hose->mem_offset[0];
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index f19d0bdc3241..feb8580fdc84 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -32,8 +32,6 @@
static loff_t page_map_seek( struct file *file, loff_t off, int whence)
{
loff_t new;
- struct proc_dir_entry *dp = PDE(file_inode(file));
-
switch(whence) {
case 0:
new = off;
@@ -42,12 +40,12 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
new = file->f_pos + off;
break;
case 2:
- new = dp->size + off;
+ new = PAGE_SIZE + off;
break;
default:
return -EINVAL;
}
- if ( new < 0 || new > dp->size )
+ if ( new < 0 || new > PAGE_SIZE )
return -EINVAL;
return (file->f_pos = new);
}
@@ -55,19 +53,18 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
+ return simple_read_from_buffer(buf, nbytes, ppos,
+ PDE_DATA(file_inode(file)), PAGE_SIZE);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
-
- if ((vma->vm_end - vma->vm_start) > dp->size)
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE)
return -EINVAL;
- remap_pfn_range(vma, vma->vm_start, __pa(dp->data) >> PAGE_SHIFT,
- dp->size, vma->vm_page_prot);
+ remap_pfn_range(vma, vma->vm_start,
+ __pa(PDE_DATA(file_inode(file))) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot);
return 0;
}
@@ -86,7 +83,7 @@ static int __init proc_ppc64_init(void)
&page_map_fops, vdso_data);
if (!pde)
return 1;
- pde->size = PAGE_SIZE;
+ proc_set_size(pde, PAGE_SIZE);
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 59dd545fdde1..ceb4e7b62cf4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC;
}
+#endif
/* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX;
@@ -829,6 +831,8 @@ void show_regs(struct pt_regs * regs)
{
int i, trap;
+ show_regs_print_info(KERN_DEFAULT);
+
printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %p TRAP: %04lx %s (%s)\n",
@@ -848,12 +852,6 @@ void show_regs(struct pt_regs * regs)
#else
printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
#endif
- printk("TASK = %p[%d] '%s' THREAD: %p",
- current, task_pid_nr(current), current->comm, task_thread_info(current));
-
-#ifdef CONFIG_SMP
- printk(" CPU: %d", raw_smp_processor_id());
-#endif /* CONFIG_SMP */
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
@@ -910,10 +908,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- flush_ptrace_hw_breakpoint(src);
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
*dst = *src;
return 0;
}
@@ -984,6 +978,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ p->thread.ptrace_bps[0] = NULL;
+#endif
+
#ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
@@ -1360,12 +1358,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
} while (count++ < kstack_depth_to_print);
}
-void dump_stack(void)
-{
- show_stack(current, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
-
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
void __ppc64_runlatch_on(void)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 13f8d168b3f1..5eccda9fd33f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -627,16 +627,11 @@ static void __init early_cmdline_parse(void)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
- * There are two methods for telling firmware what our capabilities are.
- * Newer machines have an "ibm,client-architecture-support" method on the
- * root node. For older machines, we have to call the "process-elf-header"
- * method in the /packages/elf-loader node, passing it a fake 32-bit
- * ELF header containing a couple of PT_NOTE sections that contain
- * structures that contain various information.
- */
-
-/*
- * New method - extensible architecture description vector.
+ * The architecture vector has an array of PVR mask/value pairs,
+ * followed by # option vectors - 1, followed by the option vectors.
+ *
+ * See prom.h for the definition of the bits specified in the
+ * architecture vector.
*
* Because the description vector contains a mix of byte and word
* values, we declare it as an unsigned char array, and use this
@@ -645,65 +640,7 @@ static void __init early_cmdline_parse(void)
#define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
((x) >> 8) & 0xff, (x) & 0xff
-/* Option vector bits - generic bits in byte 1 */
-#define OV_IGNORE 0x80 /* ignore this vector */
-#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
-
-/* Option vector 1: processor architectures supported */
-#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
-#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
-#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
-#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
-#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
-#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
-#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
-#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
-
-/* Option vector 2: Open Firmware options supported */
-#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
-
-/* Option vector 3: processor options supported */
-#define OV3_FP 0x80 /* floating point */
-#define OV3_VMX 0x40 /* VMX/Altivec */
-#define OV3_DFP 0x20 /* decimal FP */
-
-/* Option vector 4: IBM PAPR implementation */
-#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
-
-/* Option vector 5: PAPR/OF options supported */
-#define OV5_LPAR 0x80 /* logical partitioning supported */
-#define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */
-/* ibm,dynamic-reconfiguration-memory property supported */
-#define OV5_DRCONF_MEMORY 0x20
-#define OV5_LARGE_PAGES 0x10 /* large pages supported */
-#define OV5_DONATE_DEDICATE_CPU 0x02 /* donate dedicated CPU support */
-/* PCIe/MSI support. Without MSI full PCIe is not supported */
-#ifdef CONFIG_PCI_MSI
-#define OV5_MSI 0x01 /* PCIe/MSI support */
-#else
-#define OV5_MSI 0x00
-#endif /* CONFIG_PCI_MSI */
-#ifdef CONFIG_PPC_SMLPAR
-#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
-#define OV5_XCMO 0x40 /* Page Coalescing */
-#else
-#define OV5_CMO 0x00
-#define OV5_XCMO 0x00
-#endif
-#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
-#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
-#define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */
-#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
-#define OV5_SUB_PROCESSORS 0x01 /* 1,2,or 4 Sub-Processors supported */
-
-/* Option Vector 6: IBM PAPR hints */
-#define OV6_LINUX 0x02 /* Linux is our OS */
-
-/*
- * The architecture vector has an array of PVR mask/value pairs,
- * followed by # option vectors - 1, followed by the option vectors.
- */
-static unsigned char ibm_architecture_vec[] = {
+unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
@@ -747,11 +684,21 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 5: PAPR/OF options */
19 - 2, /* length */
0, /* don't ignore, don't halt */
- OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
- OV5_DONATE_DEDICATE_CPU | OV5_MSI,
+ OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
+ OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
+#ifdef CONFIG_PCI_MSI
+ /* PCIe/MSI support. Without MSI full PCIe is not supported */
+ OV5_FEAT(OV5_MSI),
+#else
0,
- OV5_CMO | OV5_XCMO,
- OV5_TYPE1_AFFINITY,
+#endif
+ 0,
+#ifdef CONFIG_PPC_SMLPAR
+ OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
+#else
+ 0,
+#endif
+ OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
0,
0,
0,
@@ -765,8 +712,9 @@ static unsigned char ibm_architecture_vec[] = {
0,
0,
0,
- OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
- OV5_SUB_PROCESSORS,
+ OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
+ OV5_FEAT(OV5_PFO_HW_842),
+ OV5_FEAT(OV5_SUB_PROCESSORS),
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index f9b30c68ba47..3b14d320e69f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -180,9 +180,10 @@ static int set_user_msr(struct task_struct *task, unsigned long msr)
}
#ifdef CONFIG_PPC64
-static unsigned long get_user_dscr(struct task_struct *task)
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
- return task->thread.dscr;
+ *data = task->thread.dscr;
+ return 0;
}
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
@@ -192,7 +193,7 @@ static int set_user_dscr(struct task_struct *task, unsigned long dscr)
return 0;
}
#else
-static unsigned long get_user_dscr(struct task_struct *task)
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
return -EIO;
}
@@ -216,19 +217,23 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
/*
* Get contents of register REGNO in task TASK.
*/
-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
+int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
{
- if (task->thread.regs == NULL)
+ if ((task->thread.regs == NULL) || !data)
return -EIO;
- if (regno == PT_MSR)
- return get_user_msr(task);
+ if (regno == PT_MSR) {
+ *data = get_user_msr(task);
+ return 0;
+ }
if (regno == PT_DSCR)
- return get_user_dscr(task);
+ return get_user_dscr(task, data);
- if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
- return ((unsigned long *)task->thread.regs)[regno];
+ if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
+ *data = ((unsigned long *)task->thread.regs)[regno];
+ return 0;
+ }
return -EIO;
}
@@ -1560,7 +1565,9 @@ long arch_ptrace(struct task_struct *child, long request,
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
- tmp = ptrace_get_reg(child, (int) index);
+ ret = ptrace_get_reg(child, (int) index, &tmp);
+ if (ret)
+ break;
} else {
unsigned int fpidx = index - PT_FPR0;
@@ -1637,6 +1644,8 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
+ if (cpu_has_feature(CPU_FTR_DAWR))
+ dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else
dbginfo.features = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index c0244e766834..f51599e941c7 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -95,7 +95,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
- tmp = ptrace_get_reg(child, index);
+ ret = ptrace_get_reg(child, index, &tmp);
+ if (ret)
+ break;
} else {
flush_fp_to_thread(child);
/*
@@ -148,7 +150,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
tmp = ((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)];
} else { /* register within PT_REGS struct */
- tmp = ptrace_get_reg(child, numReg);
+ unsigned long tmp2;
+ ret = ptrace_get_reg(child, numReg, &tmp2);
+ if (ret)
+ break;
+ tmp = tmp2;
}
reg32bits = ((u32*)&tmp)[part];
ret = put_user(reg32bits, (u32 __user *)data);
@@ -232,7 +238,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
- unsigned long freg = ptrace_get_reg(child, numReg);
+ unsigned long freg;
+ ret = ptrace_get_reg(child, numReg, &freg);
+ if (ret)
+ break;
if (index % 2)
freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
else
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index c642f0132988..5b3022470126 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -57,13 +57,31 @@
#define VALIDATE_READY -1001 /* Firmware image ready for validation */
#define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */
#define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */
-#define VALIDATE_TMP_UPDATE 0 /* Validate Return Status */
-#define VALIDATE_FLASH_AUTH 1 /* Validate Return Status */
-#define VALIDATE_INVALID_IMG 2 /* Validate Return Status */
-#define VALIDATE_CUR_UNKNOWN 3 /* Validate Return Status */
-#define VALIDATE_TMP_COMMIT_DL 4 /* Validate Return Status */
-#define VALIDATE_TMP_COMMIT 5 /* Validate Return Status */
-#define VALIDATE_TMP_UPDATE_DL 6 /* Validate Return Status */
+
+/* ibm,validate-flash-image update result tokens */
+#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
+#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
+#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
+#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
+/*
+ * Current T side will be committed to P side before being replace with new
+ * image, and the new image is downlevel from current image
+ */
+#define VALIDATE_TMP_COMMIT_DL 4
+/*
+ * Current T side will be committed to P side before being replaced with new
+ * image
+ */
+#define VALIDATE_TMP_COMMIT 5
+/*
+ * T side will be updated with a downlevel image
+ */
+#define VALIDATE_TMP_UPDATE_DL 6
+/*
+ * The candidate image's release date is later than the system's firmware
+ * service entitlement date - service warranty period has expired
+ */
+#define VALIDATE_OUT_OF_WRNTY 7
/* ibm,manage-flash-image operation tokens */
#define RTAS_REJECT_TMP_IMG 0
@@ -102,9 +120,10 @@ static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
-/* Local copy of the flash block list.
- * We only allow one open of the flash proc file and create this
- * list as we go. The rtas_firmware_flash_list varable will be
+/*
+ * Local copy of the flash block list.
+ *
+ * The rtas_firmware_flash_list varable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
@@ -125,23 +144,23 @@ struct rtas_update_flash_t
struct rtas_manage_flash_t
{
int status; /* Returned status */
- unsigned int op; /* Reject or commit image */
};
/* Status int must be first member of struct */
struct rtas_validate_flash_t
{
int status; /* Returned status */
- char buf[VALIDATE_BUF_SIZE]; /* Candidate image buffer */
+ char *buf; /* Candidate image buffer */
unsigned int buf_size; /* Size of image buf */
unsigned int update_results; /* Update results token */
};
-static DEFINE_SPINLOCK(flash_file_open_lock);
-static struct proc_dir_entry *firmware_flash_pde;
-static struct proc_dir_entry *firmware_update_pde;
-static struct proc_dir_entry *validate_pde;
-static struct proc_dir_entry *manage_pde;
+static struct rtas_update_flash_t rtas_update_flash_data;
+static struct rtas_manage_flash_t rtas_manage_flash_data;
+static struct rtas_validate_flash_t rtas_validate_flash_data;
+static DEFINE_MUTEX(rtas_update_flash_mutex);
+static DEFINE_MUTEX(rtas_manage_flash_mutex);
+static DEFINE_MUTEX(rtas_validate_flash_mutex);
/* Do simple sanity checks on the flash image. */
static int flash_list_valid(struct flash_block_list *flist)
@@ -191,10 +210,10 @@ static void free_flash_list(struct flash_block_list *f)
static int rtas_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_update_flash_t *uf;
-
- uf = (struct rtas_update_flash_t *) dp->data;
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
+
+ mutex_lock(&rtas_update_flash_mutex);
+
if (uf->flist) {
/* File was opened in write mode for a new flash attempt */
/* Clear saved list */
@@ -214,13 +233,14 @@ static int rtas_flash_release(struct inode *inode, struct file *file)
uf->flist = NULL;
}
- atomic_dec(&dp->count);
+ mutex_unlock(&rtas_update_flash_mutex);
return 0;
}
-static void get_flash_status_msg(int status, char *buf)
+static size_t get_flash_status_msg(int status, char *buf)
{
- char *msg;
+ const char *msg;
+ size_t len;
switch (status) {
case FLASH_AUTH:
@@ -242,36 +262,47 @@ static void get_flash_status_msg(int status, char *buf)
msg = "ready: firmware image ready for flash on reboot\n";
break;
default:
- sprintf(buf, "error: unexpected status value %d\n", status);
- return;
+ return sprintf(buf, "error: unexpected status value %d\n",
+ status);
}
- strcpy(buf, msg);
+ len = strlen(msg);
+ memcpy(buf, msg, len + 1);
+ return len;
}
/* Reading the proc file will show status (not the firmware contents) */
-static ssize_t rtas_flash_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t rtas_flash_read_msg(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_update_flash_t *uf;
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char msg[RTAS_MSG_MAXLEN];
+ size_t len;
+ int status;
- uf = dp->data;
-
- if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
- get_flash_status_msg(uf->status, msg);
- } else { /* FIRMWARE_UPDATE_NAME */
- sprintf(msg, "%d\n", uf->status);
- }
+ mutex_lock(&rtas_update_flash_mutex);
+ status = uf->status;
+ mutex_unlock(&rtas_update_flash_mutex);
- return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
+ /* Read as text message */
+ len = get_flash_status_msg(status, msg);
+ return simple_read_from_buffer(buf, count, ppos, msg, len);
}
-/* constructor for flash_block_cache */
-void rtas_block_ctor(void *ptr)
+static ssize_t rtas_flash_read_num(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- memset(ptr, 0, RTAS_BLK_SIZE);
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
+ char msg[RTAS_MSG_MAXLEN];
+ int status;
+
+ mutex_lock(&rtas_update_flash_mutex);
+ status = uf->status;
+ mutex_unlock(&rtas_update_flash_mutex);
+
+ /* Read as number */
+ sprintf(msg, "%d\n", status);
+ return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
}
/* We could be much more efficient here. But to keep this function
@@ -282,25 +313,24 @@ void rtas_block_ctor(void *ptr)
static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_update_flash_t *uf;
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char *p;
- int next_free;
+ int next_free, rc;
struct flash_block_list *fl;
- uf = (struct rtas_update_flash_t *) dp->data;
+ mutex_lock(&rtas_update_flash_mutex);
if (uf->status == FLASH_AUTH || count == 0)
- return count; /* discard data */
+ goto out; /* discard data */
/* In the case that the image is not ready for flashing, the memory
* allocated for the block list will be freed upon the release of the
* proc file
*/
if (uf->flist == NULL) {
- uf->flist = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
+ uf->flist = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
- return -ENOMEM;
+ goto nomem;
}
fl = uf->flist;
@@ -309,63 +339,48 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
next_free = fl->num_blocks;
if (next_free == FLASH_BLOCKS_PER_NODE) {
/* Need to allocate another block_list */
- fl->next = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
+ fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
- return -ENOMEM;
+ goto nomem;
fl = fl->next;
next_free = 0;
}
if (count > RTAS_BLK_SIZE)
count = RTAS_BLK_SIZE;
- p = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
+ p = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!p)
- return -ENOMEM;
+ goto nomem;
if(copy_from_user(p, buffer, count)) {
kmem_cache_free(flash_block_cache, p);
- return -EFAULT;
+ rc = -EFAULT;
+ goto error;
}
fl->blocks[next_free].data = p;
fl->blocks[next_free].length = count;
fl->num_blocks++;
-
+out:
+ mutex_unlock(&rtas_update_flash_mutex);
return count;
-}
-static int rtas_excl_open(struct inode *inode, struct file *file)
-{
- struct proc_dir_entry *dp = PDE(inode);
-
- /* Enforce exclusive open with use count of PDE */
- spin_lock(&flash_file_open_lock);
- if (atomic_read(&dp->count) > 2) {
- spin_unlock(&flash_file_open_lock);
- return -EBUSY;
- }
-
- atomic_inc(&dp->count);
- spin_unlock(&flash_file_open_lock);
-
- return 0;
-}
-
-static int rtas_excl_release(struct inode *inode, struct file *file)
-{
- struct proc_dir_entry *dp = PDE(inode);
-
- atomic_dec(&dp->count);
-
- return 0;
+nomem:
+ rc = -ENOMEM;
+error:
+ mutex_unlock(&rtas_update_flash_mutex);
+ return rc;
}
-static void manage_flash(struct rtas_manage_flash_t *args_buf)
+/*
+ * Flash management routines.
+ */
+static void manage_flash(struct rtas_manage_flash_t *args_buf, unsigned int op)
{
s32 rc;
do {
- rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
- 1, NULL, args_buf->op);
+ rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1, 1,
+ NULL, op);
} while (rtas_busy_delay(rc));
args_buf->status = rc;
@@ -374,55 +389,62 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf)
static ssize_t manage_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_manage_flash_t *args_buf;
+ struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data;
char msg[RTAS_MSG_MAXLEN];
- int msglen;
+ int msglen, status;
- args_buf = dp->data;
- if (args_buf == NULL)
- return 0;
-
- msglen = sprintf(msg, "%d\n", args_buf->status);
+ mutex_lock(&rtas_manage_flash_mutex);
+ status = args_buf->status;
+ mutex_unlock(&rtas_manage_flash_mutex);
+ msglen = sprintf(msg, "%d\n", status);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_manage_flash_t *args_buf;
- const char reject_str[] = "0";
- const char commit_str[] = "1";
+ struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data;
+ static const char reject_str[] = "0";
+ static const char commit_str[] = "1";
char stkbuf[10];
- int op;
+ int op, rc;
+
+ mutex_lock(&rtas_manage_flash_mutex);
- args_buf = (struct rtas_manage_flash_t *) dp->data;
if ((args_buf->status == MANAGE_AUTH) || (count == 0))
- return count;
+ goto out;
op = -1;
if (buf) {
if (count > 9) count = 9;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
+ rc = -EFAULT;
+ if (copy_from_user (stkbuf, buf, count))
+ goto error;
if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0)
op = RTAS_REJECT_TMP_IMG;
else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0)
op = RTAS_COMMIT_TMP_IMG;
}
- if (op == -1) /* buf is empty, or contains invalid string */
- return -EINVAL;
-
- args_buf->op = op;
- manage_flash(args_buf);
+ if (op == -1) { /* buf is empty, or contains invalid string */
+ rc = -EINVAL;
+ goto error;
+ }
+ manage_flash(args_buf, op);
+out:
+ mutex_unlock(&rtas_manage_flash_mutex);
return count;
+
+error:
+ mutex_unlock(&rtas_manage_flash_mutex);
+ return rc;
}
+/*
+ * Validation routines.
+ */
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_token("ibm,validate-flash-image");
@@ -462,14 +484,14 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
static ssize_t validate_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_validate_flash_t *args_buf;
+ struct rtas_validate_flash_t *const args_buf =
+ &rtas_validate_flash_data;
char msg[RTAS_MSG_MAXLEN];
int msglen;
- args_buf = dp->data;
-
+ mutex_lock(&rtas_validate_flash_mutex);
msglen = get_validate_flash_msg(args_buf, msg);
+ mutex_unlock(&rtas_validate_flash_mutex);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
@@ -477,24 +499,18 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_validate_flash_t *args_buf;
+ struct rtas_validate_flash_t *const args_buf =
+ &rtas_validate_flash_data;
int rc;
- args_buf = (struct rtas_validate_flash_t *) dp->data;
-
- if (dp->data == NULL) {
- dp->data = kmalloc(sizeof(struct rtas_validate_flash_t),
- GFP_KERNEL);
- if (dp->data == NULL)
- return -ENOMEM;
- }
+ mutex_lock(&rtas_validate_flash_mutex);
/* We are only interested in the first 4K of the
* candidate image */
if ((*off >= VALIDATE_BUF_SIZE) ||
(args_buf->status == VALIDATE_AUTH)) {
*off += count;
+ mutex_unlock(&rtas_validate_flash_mutex);
return count;
}
@@ -517,31 +533,29 @@ static ssize_t validate_flash_write(struct file *file, const char __user *buf,
*off += count;
rc = count;
done:
- if (rc < 0) {
- kfree(dp->data);
- dp->data = NULL;
- }
+ mutex_unlock(&rtas_validate_flash_mutex);
return rc;
}
static int validate_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- struct rtas_validate_flash_t *args_buf;
+ struct rtas_validate_flash_t *const args_buf =
+ &rtas_validate_flash_data;
- args_buf = (struct rtas_validate_flash_t *) dp->data;
+ mutex_lock(&rtas_validate_flash_mutex);
if (args_buf->status == VALIDATE_READY) {
args_buf->buf_size = VALIDATE_BUF_SIZE;
validate_flash(args_buf);
}
- /* The matching atomic_inc was in rtas_excl_open() */
- atomic_dec(&dp->count);
-
+ mutex_unlock(&rtas_validate_flash_mutex);
return 0;
}
+/*
+ * On-reboot flash update applicator.
+ */
static void rtas_flash_firmware(int reboot_type)
{
unsigned long image_size;
@@ -634,75 +648,57 @@ static void rtas_flash_firmware(int reboot_type)
spin_unlock(&rtas_data_buf_lock);
}
-static void remove_flash_pde(struct proc_dir_entry *dp)
-{
- if (dp) {
- kfree(dp->data);
- remove_proc_entry(dp->name, dp->parent);
- }
-}
-
-static int initialize_flash_pde_data(const char *rtas_call_name,
- size_t buf_size,
- struct proc_dir_entry *dp)
-{
+/*
+ * Manifest of proc files to create
+ */
+struct rtas_flash_file {
+ const char *filename;
+ const char *rtas_call_name;
int *status;
- int token;
-
- dp->data = kzalloc(buf_size, GFP_KERNEL);
- if (dp->data == NULL)
- return -ENOMEM;
-
- /*
- * This code assumes that the status int is the first member of the
- * struct
- */
- status = (int *) dp->data;
- token = rtas_token(rtas_call_name);
- if (token == RTAS_UNKNOWN_SERVICE)
- *status = FLASH_AUTH;
- else
- *status = FLASH_NO_OP;
-
- return 0;
-}
-
-static struct proc_dir_entry *create_flash_pde(const char *filename,
- const struct file_operations *fops)
-{
- return proc_create(filename, S_IRUSR | S_IWUSR, NULL, fops);
-}
-
-static const struct file_operations rtas_flash_operations = {
- .owner = THIS_MODULE,
- .read = rtas_flash_read,
- .write = rtas_flash_write,
- .open = rtas_excl_open,
- .release = rtas_flash_release,
- .llseek = default_llseek,
-};
-
-static const struct file_operations manage_flash_operations = {
- .owner = THIS_MODULE,
- .read = manage_flash_read,
- .write = manage_flash_write,
- .open = rtas_excl_open,
- .release = rtas_excl_release,
- .llseek = default_llseek,
+ const struct file_operations fops;
};
-static const struct file_operations validate_flash_operations = {
- .owner = THIS_MODULE,
- .read = validate_flash_read,
- .write = validate_flash_write,
- .open = rtas_excl_open,
- .release = validate_flash_release,
- .llseek = default_llseek,
+static const struct rtas_flash_file rtas_flash_files[] = {
+ {
+ .filename = "powerpc/rtas/" FIRMWARE_FLASH_NAME,
+ .rtas_call_name = "ibm,update-flash-64-and-reboot",
+ .status = &rtas_update_flash_data.status,
+ .fops.read = rtas_flash_read_msg,
+ .fops.write = rtas_flash_write,
+ .fops.release = rtas_flash_release,
+ .fops.llseek = default_llseek,
+ },
+ {
+ .filename = "powerpc/rtas/" FIRMWARE_UPDATE_NAME,
+ .rtas_call_name = "ibm,update-flash-64-and-reboot",
+ .status = &rtas_update_flash_data.status,
+ .fops.read = rtas_flash_read_num,
+ .fops.write = rtas_flash_write,
+ .fops.release = rtas_flash_release,
+ .fops.llseek = default_llseek,
+ },
+ {
+ .filename = "powerpc/rtas/" VALIDATE_FLASH_NAME,
+ .rtas_call_name = "ibm,validate-flash-image",
+ .status = &rtas_validate_flash_data.status,
+ .fops.read = validate_flash_read,
+ .fops.write = validate_flash_write,
+ .fops.release = validate_flash_release,
+ .fops.llseek = default_llseek,
+ },
+ {
+ .filename = "powerpc/rtas/" MANAGE_FLASH_NAME,
+ .rtas_call_name = "ibm,manage-flash-image",
+ .status = &rtas_manage_flash_data.status,
+ .fops.read = manage_flash_read,
+ .fops.write = manage_flash_write,
+ .fops.llseek = default_llseek,
+ }
};
static int __init rtas_flash_init(void)
{
- int rc;
+ int i;
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
@@ -710,93 +706,70 @@ static int __init rtas_flash_init(void)
return 1;
}
- firmware_flash_pde = create_flash_pde("powerpc/rtas/"
- FIRMWARE_FLASH_NAME,
- &rtas_flash_operations);
- if (firmware_flash_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
+ rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
+ if (!rtas_validate_flash_data.buf)
+ return -ENOMEM;
- rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
- sizeof(struct rtas_update_flash_t),
- firmware_flash_pde);
- if (rc != 0)
- goto cleanup;
-
- firmware_update_pde = create_flash_pde("powerpc/rtas/"
- FIRMWARE_UPDATE_NAME,
- &rtas_flash_operations);
- if (firmware_update_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
+ flash_block_cache = kmem_cache_create("rtas_flash_cache",
+ RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+ NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __func__);
+ goto enomem_buf;
}
- rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
- sizeof(struct rtas_update_flash_t),
- firmware_update_pde);
- if (rc != 0)
- goto cleanup;
-
- validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME,
- &validate_flash_operations);
- if (validate_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
+ for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) {
+ const struct rtas_flash_file *f = &rtas_flash_files[i];
+ int token;
- rc = initialize_flash_pde_data("ibm,validate-flash-image",
- sizeof(struct rtas_validate_flash_t),
- validate_pde);
- if (rc != 0)
- goto cleanup;
-
- manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME,
- &manage_flash_operations);
- if (manage_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
+ if (!proc_create(f->filename, S_IRUSR | S_IWUSR, NULL, &f->fops))
+ goto enomem;
- rc = initialize_flash_pde_data("ibm,manage-flash-image",
- sizeof(struct rtas_manage_flash_t),
- manage_pde);
- if (rc != 0)
- goto cleanup;
+ /*
+ * This code assumes that the status int is the first member of the
+ * struct
+ */
+ token = rtas_token(f->rtas_call_name);
+ if (token == RTAS_UNKNOWN_SERVICE)
+ *f->status = FLASH_AUTH;
+ else
+ *f->status = FLASH_NO_OP;
+ }
rtas_flash_term_hook = rtas_flash_firmware;
-
- flash_block_cache = kmem_cache_create("rtas_flash_cache",
- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
- rtas_block_ctor);
- if (!flash_block_cache) {
- printk(KERN_ERR "%s: failed to create block cache\n",
- __func__);
- rc = -ENOMEM;
- goto cleanup;
- }
return 0;
-cleanup:
- remove_flash_pde(firmware_flash_pde);
- remove_flash_pde(firmware_update_pde);
- remove_flash_pde(validate_pde);
- remove_flash_pde(manage_pde);
+enomem:
+ while (--i >= 0) {
+ const struct rtas_flash_file *f = &rtas_flash_files[i];
+ remove_proc_entry(f->filename, NULL);
+ }
- return rc;
+ kmem_cache_destroy(flash_block_cache);
+enomem_buf:
+ kfree(rtas_validate_flash_data.buf);
+ return -ENOMEM;
}
static void __exit rtas_flash_cleanup(void)
{
+ int i;
+
rtas_flash_term_hook = NULL;
- if (flash_block_cache)
- kmem_cache_destroy(flash_block_cache);
+ if (rtas_firmware_flash_list) {
+ free_flash_list(rtas_firmware_flash_list);
+ rtas_firmware_flash_list = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) {
+ const struct rtas_flash_file *f = &rtas_flash_files[i];
+ remove_proc_entry(f->filename, NULL);
+ }
- remove_flash_pde(firmware_flash_pde);
- remove_flash_pde(firmware_update_pde);
- remove_flash_pde(validate_pde);
- remove_flash_pde(manage_pde);
+ kmem_cache_destroy(flash_block_cache);
+ kfree(rtas_validate_flash_data.buf);
}
module_init(rtas_flash_init);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 71cb20d6ec61..6e7b7cdeec65 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -201,7 +201,7 @@ static void python_countermeasures(struct device_node *dev)
iounmap(chip_regs);
}
-void __init init_pci_config_tokens (void)
+void __init init_pci_config_tokens(void)
{
read_pci_config = rtas_token("read-pci-config");
write_pci_config = rtas_token("write-pci-config");
@@ -209,7 +209,7 @@ void __init init_pci_config_tokens (void)
ibm_write_pci_config = rtas_token("ibm,write-pci-config");
}
-unsigned long get_phb_buid (struct device_node *phb)
+unsigned long get_phb_buid(struct device_node *phb)
{
struct resource r;
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 1045ff49cc6d..1130c53ad652 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -29,6 +29,7 @@
#include <asm/nvram.h>
#include <linux/atomic.h>
#include <asm/machdep.h>
+#include <asm/topology.h>
static DEFINE_SPINLOCK(rtasd_log_lock);
@@ -87,6 +88,8 @@ static char *rtas_event_type(int type)
return "Resource Deallocation Event";
case RTAS_TYPE_DUMP:
return "Dump Notification Event";
+ case RTAS_TYPE_PRRN:
+ return "Platform Resource Reassignment Event";
}
return rtas_type[0];
@@ -265,9 +268,51 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
+}
+
+#ifdef CONFIG_PPC_PSERIES
+static s32 prrn_update_scope;
+static void prrn_work_fn(struct work_struct *work)
+{
+ /*
+ * For PRRN, we must pass the negative of the scope value in
+ * the RTAS event.
+ */
+ pseries_devicetree_update(-prrn_update_scope);
}
+static DECLARE_WORK(prrn_work, prrn_work_fn);
+
+void prrn_schedule_update(u32 scope)
+{
+ flush_work(&prrn_work);
+ prrn_update_scope = scope;
+ schedule_work(&prrn_work);
+}
+
+static void handle_rtas_event(const struct rtas_error_log *log)
+{
+ if (log->type == RTAS_TYPE_PRRN) {
+ /* For PRRN Events the extended log length is used to denote
+ * the scope for calling rtas update-nodes.
+ */
+ if (prrn_is_enabled())
+ prrn_schedule_update(log->extended_log_length);
+ }
+
+ return;
+}
+
+#else
+
+static void handle_rtas_event(const struct rtas_error_log *log)
+{
+ return;
+}
+
+#endif
+
static int rtas_log_open(struct inode * inode, struct file * file)
{
return 0;
@@ -388,8 +433,10 @@ static void do_event_scan(void)
break;
}
- if (error == 0)
+ if (error == 0) {
pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0);
+ handle_rtas_event((struct rtas_error_log *)logdata);
+ }
} while(error == 0);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index bdc499c17872..63d051f5b7a5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -621,12 +621,6 @@ int check_legacy_ioport(unsigned long base_port)
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
break;
-#ifdef CONFIG_PPC_PREP
- case _PIDXR:
- case _PNPWRP:
- case PNPBIOS_BASE:
- /* implement me */
-#endif
default:
/* ipmi is supposed to fail here */
break;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 75fbaceb5c87..e379d3fd1694 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -583,7 +583,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
-
+#ifdef CONFIG_PPC_64K_PAGES
+ init_mm.context.pte_frag = NULL;
+#endif
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3acb28e245b4..95068bf569ad 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(&current->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&current->thread);
regs->msr |= MSR_VEC;
}
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 995f8543cb57..c1794286098c 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(&current->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&current->thread);
regs->msr |= MSR_VEC;
}
+#endif
return err;
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 76bd9da8cb71..ee7ac5e6e28a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -669,7 +669,7 @@ __cpuinit void start_secondary(void *unused)
local_irq_enable();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
BUG();
}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d0bafc0cdf06..cd6e19d263b3 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -61,91 +61,6 @@ asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
}
-#ifdef CONFIG_SYSVIPC
-long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
- u32 fifth)
-{
- int version;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
-
- case SEMTIMEDOP:
- if (fifth)
- /* sign extend semid */
- return compat_sys_semtimedop((int)first,
- compat_ptr(ptr), second,
- compat_ptr(fifth));
- /* else fall through for normal semop() */
- case SEMOP:
- /* struct sembuf is the same on 32 and 64bit :)) */
- /* sign extend semid */
- return sys_semtimedop((int)first, compat_ptr(ptr), second,
- NULL);
- case SEMGET:
- /* sign extend key, nsems */
- return sys_semget((int)first, (int)second, third);
- case SEMCTL:
- /* sign extend semid, semnum */
- return compat_sys_semctl((int)first, (int)second, third,
- compat_ptr(ptr));
-
- case MSGSND:
- /* sign extend msqid */
- return compat_sys_msgsnd((int)first, (int)second, third,
- compat_ptr(ptr));
- case MSGRCV:
- /* sign extend msqid, msgtyp */
- return compat_sys_msgrcv((int)first, second, (int)fifth,
- third, version, compat_ptr(ptr));
- case MSGGET:
- /* sign extend key */
- return sys_msgget((int)first, second);
- case MSGCTL:
- /* sign extend msqid */
- return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
-
- case SHMAT:
- /* sign extend shmid */
- return compat_sys_shmat((int)first, second, third, version,
- compat_ptr(ptr));
- case SHMDT:
- return sys_shmdt(compat_ptr(ptr));
- case SHMGET:
- /* sign extend key_t */
- return sys_shmget((int)first, second, third);
- case SHMCTL:
- /* sign extend shmid */
- return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
-
- default:
- return -ENOSYS;
- }
-
- return -ENOSYS;
-}
-#endif
-
-/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sendfile_wrapper(u32 out_fd, u32 in_fd,
- compat_off_t __user *offset, u32 count)
-{
- return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
-}
-
-asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
- compat_loff_t __user *offset, u32 count)
-{
- return sys_sendfile((int)out_fd, (int)in_fd,
- (off_t __user *)offset, count);
-}
-
unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
@@ -195,13 +110,6 @@ asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long h
return sys_ftruncate(fd, (high << 32) | low);
}
-long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
- size_t len)
-{
- return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
- buf, len);
-}
-
long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
size_t len, int advice)
{
@@ -209,23 +117,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
advice);
}
-asmlinkage long compat_sys_add_key(const char __user *_type,
- const char __user *_description,
- const void __user *_payload,
- u32 plen,
- u32 ringid)
-{
- return sys_add_key(_type, _description, _payload, plen, ringid);
-}
-
-asmlinkage long compat_sys_request_key(const char __user *_type,
- const char __user *_description,
- const char __user *_callout_info,
- u32 destringid)
-{
- return sys_request_key(_type, _description, _callout_info, destringid);
-}
-
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
unsigned offset_hi, unsigned offset_lo,
unsigned nbytes_hi, unsigned nbytes_lo)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 3ce1f864c2d3..e68a84568b8b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -180,7 +180,7 @@ SYSFS_PMCSETUP(dscr, SPRN_DSCR);
SYSFS_PMCSETUP(pir, SPRN_PIR);
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
-static DEVICE_ATTR(spurr, 0600, show_spurr, NULL);
+static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f77fa22754bc..5fc29ad7e26f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1049,10 +1049,8 @@ static int __init rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_RET(pdev);
}
module_init(rtc_init);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 84dbace657ce..2da67e7a16d5 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5
+#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
+#endif
dont_restore_vec:
andi. r0, r4, MSR_FP
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 37cc40ef5043..83efa2f7d926 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -970,7 +970,10 @@ static int emulate_instruction(struct pt_regs *regs)
#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
- if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
+ PPC_INST_MFSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
+ PPC_INST_MFSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mfdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -978,7 +981,10 @@ static int emulate_instruction(struct pt_regs *regs)
return 0;
}
/* Emulate the mtspr DSCR, rD. */
- if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
+ PPC_INST_MTSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
+ PPC_INST_MTSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index f9748498fe58..13b867093499 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -156,15 +156,13 @@ static struct console udbg_console = {
.index = 0,
};
-static int early_console_initialized;
-
/*
* Called by setup_system after ppc_md->probe and ppc_md->early_init.
* Call it again after setting udbg_putc in ppc_md->setup_arch.
*/
void __init register_early_udbg_console(void)
{
- if (early_console_initialized)
+ if (early_console)
return;
if (!udbg_putc)
@@ -174,7 +172,7 @@ void __init register_early_udbg_console(void)
printk(KERN_INFO "early console immortal !\n");
udbg_console.flags &= ~CON_BOOT;
}
- early_console_initialized = 1;
+ early_console = &udbg_console;
register_console(&udbg_console);
}
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index bc77834dbf43..59f419b935f2 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -31,6 +31,16 @@
#define UPROBE_TRAP_NR UINT_MAX
/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ return (is_trap(*insn));
+}
+
+/**
* arch_uprobe_analyze_insn
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
@@ -43,12 +53,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
if (addr & 0x03)
return -EINVAL;
- /*
- * We currently don't support a uprobe on an already
- * existing breakpoint instruction underneath
- */
- if (is_trap(auprobe->ainsn))
- return -ENOTSUPP;
return 0;
}
@@ -188,3 +192,16 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
return false;
}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr;
+
+ orig_ret_vaddr = regs->link;
+
+ /* Replace the return addr with trampoline addr */
+ regs->link = trampoline_vaddr;
+
+ return orig_ret_vaddr;
+}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1b2076f049ce..d4f463ac65b1 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -113,6 +113,10 @@ static struct vdso_patch_def vdso_patches[] = {
CPU_FTR_USE_TB, 0,
"__kernel_get_tbfreq", NULL
},
+ {
+ CPU_FTR_USE_TB, 0,
+ "__kernel_time", NULL
+ },
};
/*
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 4ee09ee2e836..27e2f623210b 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -181,6 +181,32 @@ V_FUNCTION_END(__kernel_clock_getres)
/*
+ * Exact prototype of time()
+ *
+ * time_t time(time *t);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_time)
+ .cfi_startproc
+ mflr r12
+ .cfi_register lr,r12
+
+ mr r11,r3 /* r11 holds t */
+ bl __get_datapage@local
+ mr r9, r3 /* datapage ptr in r9 */
+
+ lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
+
+ cmplwi r11,0 /* check if t is NULL */
+ beq 2f
+ stw r3,0(r11) /* store result at *t */
+2: mtlr r12
+ crclr cr0*4+so
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_time)
+
+/*
* This is the core of clock_gettime() and gettimeofday(),
* it returns the current time in r3 (seconds) and r4.
* On entry, r7 gives the resolution of r4, either USEC_PER_SEC
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 43200ba2e570..f223409629b9 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -150,6 +150,7 @@ VERSION
#ifdef CONFIG_PPC64
__kernel_getcpu;
#endif
+ __kernel_time;
local: *;
};
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index e97a9a0dc4ac..a76b4af37ef2 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -164,6 +164,32 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
.cfi_endproc
V_FUNCTION_END(__kernel_clock_getres)
+/*
+ * Exact prototype of time()
+ *
+ * time_t time(time *t);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_time)
+ .cfi_startproc
+ mflr r12
+ .cfi_register lr,r12
+
+ mr r11,r3 /* r11 holds t */
+ bl V_LOCAL_FUNC(__get_datapage)
+
+ ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+
+ cmpldi r11,0 /* check if t is NULL */
+ beq 2f
+ std r4,0(r11) /* store result at *t */
+2: mtlr r12
+ crclr cr0*4+so
+ mr r3,r4
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_time)
+
/*
* This is the core of clock_gettime() and gettimeofday(),
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index e6c1758f3588..e4863819663b 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -147,6 +147,7 @@ VERSION
__kernel_sync_dicache_p5;
__kernel_sigtramp_rt64;
__kernel_getcpu;
+ __kernel_time;
local: *;
};
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 3d7fd21c65f9..2f5c6b6d6877 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -124,6 +124,18 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ return -EINVAL;
+}
+
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ return -EINVAL;
+}
+
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_44x *vcpu_44x;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 63c67ec72e43..eb643f862579 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -136,21 +136,41 @@ config KVM_E500V2
If unsure, say N.
config KVM_E500MC
- bool "KVM support for PowerPC E500MC/E5500 processors"
+ bool "KVM support for PowerPC E500MC/E5500/E6500 processors"
depends on PPC_E500MC
select KVM
select KVM_MMIO
select KVM_BOOKE_HV
select MMU_NOTIFIER
---help---
- Support running unmodified E500MC/E5500 (32-bit) guest kernels in
- virtual machines on E500MC/E5500 host processors.
+ Support running unmodified E500MC/E5500/E6500 guest kernels in
+ virtual machines on E500MC/E5500/E6500 host processors.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
+config KVM_MPIC
+ bool "KVM in-kernel MPIC emulation"
+ depends on KVM && E500
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_MSI
+ help
+ Enable support for emulating MPIC devices inside the
+ host kernel, rather than relying on userspace to emulate.
+ Currently, support is limited to certain versions of
+ Freescale's MPIC implementation.
+
+config KVM_XICS
+ bool "KVM in-kernel XICS emulation"
+ depends on KVM_BOOK3S_64 && !KVM_MPIC
+ ---help---
+ Include support for the XICS (eXternal Interrupt Controller
+ Specification) interrupt controller architecture used on
+ IBM POWER (pSeries) servers.
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index b772eded8c26..422de3f4d46c 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -72,12 +72,18 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o
+kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
+ book3s_hv_rm_xics.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_64_vio_hv.o \
book3s_hv_ras.o \
- book3s_hv_builtin.o
+ book3s_hv_builtin.o \
+ $(kvm-book3s_64-builtin-xics-objs-y)
+
+kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
+ book3s_xics.o
kvm-book3s_64-module-objs := \
../../../virt/kvm/kvm_main.o \
@@ -86,6 +92,7 @@ kvm-book3s_64-module-objs := \
emulate.o \
book3s.o \
book3s_64_vio.o \
+ book3s_rtas.o \
$(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
@@ -103,6 +110,9 @@ kvm-book3s_32-objs := \
book3s_32_mmu.o
kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
+kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
+kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(addprefix ../../../virt/kvm/, irqchip.o)
+
kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_440) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index a4b645285240..700df6f1d32c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -104,7 +104,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
return prio;
}
-static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec)
{
unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -160,8 +160,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio(vcpu, vec);
}
-void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
- struct kvm_interrupt *irq)
+void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
@@ -530,6 +529,21 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
break;
#endif /* CONFIG_ALTIVEC */
+ case KVM_REG_PPC_DEBUG_INST: {
+ u32 opcode = INS_TW;
+ r = copy_to_user((u32 __user *)(long)reg->addr,
+ &opcode, sizeof(u32));
+ break;
+ }
+#ifdef CONFIG_KVM_XICS
+ case KVM_REG_PPC_ICP_STATE:
+ if (!vcpu->arch.icp) {
+ r = -ENXIO;
+ break;
+ }
+ val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
+ break;
+#endif /* CONFIG_KVM_XICS */
default:
r = -EINVAL;
break;
@@ -592,6 +606,16 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
break;
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_KVM_XICS
+ case KVM_REG_PPC_ICP_STATE:
+ if (!vcpu->arch.icp) {
+ r = -ENXIO;
+ break;
+ }
+ r = kvmppc_xics_set_icp(vcpu,
+ set_reg_val(reg->id, val));
+ break;
+#endif /* CONFIG_KVM_XICS */
default:
r = -EINVAL;
break;
@@ -607,6 +631,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
+
void kvmppc_decrementer_func(unsigned long data)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 5d7d29a313eb..3a9a1aceb14f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -143,7 +143,7 @@ map_again:
}
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
- MMU_PAGE_4K, MMU_SEGSIZE_256M);
+ MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8cc18abd6dde..5880dfb31074 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -893,7 +893,10 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
/* Harvest R and C */
rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
- rev[i].guest_rpte = ptel | rcbits;
+ if (rcbits & ~rev[i].guest_rpte) {
+ rev[i].guest_rpte = ptel | rcbits;
+ note_hpte_modification(kvm, &rev[i]);
+ }
}
unlock_rmap(rmapp);
hptep[0] &= ~HPTE_V_HVLOCK;
@@ -976,7 +979,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
/* Now check and modify the HPTE */
if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
kvmppc_clear_ref_hpte(kvm, hptep, i);
- rev[i].guest_rpte |= HPTE_R_R;
+ if (!(rev[i].guest_rpte & HPTE_R_R)) {
+ rev[i].guest_rpte |= HPTE_R_R;
+ note_hpte_modification(kvm, &rev[i]);
+ }
ret = 1;
}
hptep[0] &= ~HPTE_V_HVLOCK;
@@ -1080,7 +1086,10 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
hptep[1] &= ~HPTE_R_C;
eieio();
hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
- rev[i].guest_rpte |= HPTE_R_C;
+ if (!(rev[i].guest_rpte & HPTE_R_C)) {
+ rev[i].guest_rpte |= HPTE_R_C;
+ note_hpte_modification(kvm, &rev[i]);
+ }
ret = 1;
}
hptep[0] &= ~HPTE_V_HVLOCK;
@@ -1090,11 +1099,30 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
return ret;
}
+static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
+ struct kvm_memory_slot *memslot,
+ unsigned long *map)
+{
+ unsigned long gfn;
+
+ if (!vpa->dirty || !vpa->pinned_addr)
+ return;
+ gfn = vpa->gpa >> PAGE_SHIFT;
+ if (gfn < memslot->base_gfn ||
+ gfn >= memslot->base_gfn + memslot->npages)
+ return;
+
+ vpa->dirty = false;
+ if (map)
+ __set_bit_le(gfn - memslot->base_gfn, map);
+}
+
long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long *map)
{
unsigned long i;
unsigned long *rmapp;
+ struct kvm_vcpu *vcpu;
preempt_disable();
rmapp = memslot->arch.rmap;
@@ -1103,6 +1131,15 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
__set_bit_le(i, map);
++rmapp;
}
+
+ /* Harvest dirty bits from VPA and DTL updates */
+ /* Note: we never modify the SLB shadow buffer areas */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map);
+ harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ }
preempt_enable();
return 0;
}
@@ -1114,7 +1151,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
unsigned long gfn = gpa >> PAGE_SHIFT;
struct page *page, *pages[1];
int npages;
- unsigned long hva, psize, offset;
+ unsigned long hva, offset;
unsigned long pa;
unsigned long *physp;
int srcu_idx;
@@ -1146,14 +1183,9 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- psize = PAGE_SIZE;
- if (PageHuge(page)) {
- page = compound_head(page);
- psize <<= compound_order(page);
- }
- offset = gpa & (psize - 1);
+ offset = gpa & (PAGE_SIZE - 1);
if (nb_ret)
- *nb_ret = psize - offset;
+ *nb_ret = PAGE_SIZE - offset;
return page_address(page) + offset;
err:
@@ -1161,11 +1193,31 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
return NULL;
}
-void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
+void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
+ bool dirty)
{
struct page *page = virt_to_page(va);
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn;
+ unsigned long *rmap;
+ int srcu_idx;
put_page(page);
+
+ if (!dirty || !kvm->arch.using_mmu_notifiers)
+ return;
+
+ /* We need to mark this page dirty in the rmap chain */
+ gfn = gpa >> PAGE_SHIFT;
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (memslot) {
+ rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ lock_rmap(rmap);
+ *rmap |= KVMPPC_RMAP_CHANGED;
+ unlock_rmap(rmap);
+ }
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
}
/*
@@ -1193,16 +1245,36 @@ struct kvm_htab_ctx {
#define HPTE_SIZE (2 * sizeof(unsigned long))
+/*
+ * Returns 1 if this HPT entry has been modified or has pending
+ * R/C bit changes.
+ */
+static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
+{
+ unsigned long rcbits_unset;
+
+ if (revp->guest_rpte & HPTE_GR_MODIFIED)
+ return 1;
+
+ /* Also need to consider changes in reference and changed bits */
+ rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
+ if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
+ return 1;
+
+ return 0;
+}
+
static long record_hpte(unsigned long flags, unsigned long *hptp,
unsigned long *hpte, struct revmap_entry *revp,
int want_valid, int first_pass)
{
unsigned long v, r;
+ unsigned long rcbits_unset;
int ok = 1;
int valid, dirty;
/* Unmodified entries are uninteresting except on the first pass */
- dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+ dirty = hpte_dirty(revp, hptp);
if (!first_pass && !dirty)
return 0;
@@ -1223,16 +1295,28 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
cpu_relax();
v = hptp[0];
+
+ /* re-evaluate valid and dirty from synchronized HPTE value */
+ valid = !!(v & HPTE_V_VALID);
+ dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+
+ /* Harvest R and C into guest view if necessary */
+ rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
+ if (valid && (rcbits_unset & hptp[1])) {
+ revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
+ HPTE_GR_MODIFIED;
+ dirty = 1;
+ }
+
if (v & HPTE_V_ABSENT) {
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
+ valid = 1;
}
- /* re-evaluate valid and dirty from synchronized HPTE value */
- valid = !!(v & HPTE_V_VALID);
if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
valid = 0;
- r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C));
- dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+
+ r = revp->guest_rpte;
/* only clear modified if this is the right sort of entry */
if (valid == want_valid && dirty) {
r &= ~HPTE_GR_MODIFIED;
@@ -1288,7 +1372,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
/* Skip uninteresting entries, i.e. clean on not-first pass */
if (!first_pass) {
while (i < kvm->arch.hpt_npte &&
- !(revp->guest_rpte & HPTE_GR_MODIFIED)) {
+ !hpte_dirty(revp, hptp)) {
++i;
hptp += 2;
++revp;
@@ -1467,7 +1551,7 @@ static int kvm_htab_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_htab_fops = {
+static const struct file_operations kvm_htab_fops = {
.read = kvm_htab_read,
.write = kvm_htab_write,
.llseek = default_llseek,
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 72ffc899c082..b2d3f3b2de72 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -92,7 +92,7 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_spapr_tce_fops = {
+static const struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 836c56975e21..1f6344c4408d 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -194,7 +194,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->papr_hcall.args[i] = gpr;
}
- emulated = EMULATE_DO_PAPR;
+ run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->arch.hcall_needed = 1;
+ emulated = EMULATE_EXIT_USER;
break;
}
#endif
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 80dcc53a1aba..9de24f8e03c7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -66,6 +66,31 @@
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int me;
+ int cpu = vcpu->cpu;
+ wait_queue_head_t *wqp;
+
+ wqp = kvm_arch_vcpu_wq(vcpu);
+ if (waitqueue_active(wqp)) {
+ wake_up_interruptible(wqp);
+ ++vcpu->stat.halt_wakeup;
+ }
+
+ me = get_cpu();
+
+ /* CPU points to the first thread of the core */
+ if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+ int real_cpu = cpu + vcpu->arch.ptid;
+ if (paca[real_cpu].kvm_hstate.xics_phys)
+ xics_wake_cpu(real_cpu);
+ else if (cpu_online(cpu))
+ smp_send_reschedule(cpu);
+ }
+ put_cpu();
+}
+
/*
* We use the vcpu_load/put functions to measure stolen time.
* Stolen time is counted as time when either the vcpu is able to
@@ -259,7 +284,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
len = ((struct reg_vpa *)va)->length.hword;
else
len = ((struct reg_vpa *)va)->length.word;
- kvmppc_unpin_guest_page(kvm, va);
+ kvmppc_unpin_guest_page(kvm, va, vpa, false);
/* Check length */
if (len > nb || len < sizeof(struct reg_vpa))
@@ -359,13 +384,13 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
va = NULL;
nb = 0;
if (gpa)
- va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+ va = kvmppc_pin_guest_page(kvm, gpa, &nb);
spin_lock(&vcpu->arch.vpa_update_lock);
if (gpa == vpap->next_gpa)
break;
/* sigh... unpin that one and try again */
if (va)
- kvmppc_unpin_guest_page(kvm, va);
+ kvmppc_unpin_guest_page(kvm, va, gpa, false);
}
vpap->update_pending = 0;
@@ -375,12 +400,15 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
* has changed the mappings underlying guest memory,
* so unregister the region.
*/
- kvmppc_unpin_guest_page(kvm, va);
+ kvmppc_unpin_guest_page(kvm, va, gpa, false);
va = NULL;
}
if (vpap->pinned_addr)
- kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
+ kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
+ vpap->dirty);
+ vpap->gpa = gpa;
vpap->pinned_addr = va;
+ vpap->dirty = false;
if (va)
vpap->pinned_end = va + vpap->len;
}
@@ -472,6 +500,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
/* order writing *dt vs. writing vpa->dtl_idx */
smp_wmb();
vpa->dtl_idx = ++vcpu->arch.dtl_index;
+ vcpu->arch.dtl.dirty = true;
}
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
@@ -479,7 +508,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
struct kvm_vcpu *tvcpu;
- int idx;
+ int idx, rc;
switch (req) {
case H_ENTER:
@@ -515,6 +544,28 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
+ case H_RTAS:
+ if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ return RESUME_HOST;
+
+ rc = kvmppc_rtas_hcall(vcpu);
+
+ if (rc == -ENOENT)
+ return RESUME_HOST;
+ else if (rc == 0)
+ break;
+
+ /* Send the error out to userspace via KVM_RUN */
+ return rc;
+
+ case H_XIRR:
+ case H_CPPR:
+ case H_EOI:
+ case H_IPI:
+ if (kvmppc_xics_enabled(vcpu)) {
+ ret = kvmppc_xics_hcall(vcpu, req);
+ break;
+ } /* fallthrough */
default:
return RESUME_HOST;
}
@@ -913,15 +964,19 @@ out:
return ERR_PTR(err);
}
+static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
+{
+ if (vpa->pinned_addr)
+ kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
+ vpa->dirty);
+}
+
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.vpa_update_lock);
- if (vcpu->arch.dtl.pinned_addr)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
- if (vcpu->arch.slb_shadow.pinned_addr)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
- if (vcpu->arch.vpa.pinned_addr)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
+ unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
+ unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
+ unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -955,7 +1010,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
}
extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern void xics_wake_cpu(int cpu);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu)
@@ -1330,9 +1384,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
break;
vc->runner = vcpu;
n_ceded = 0;
- list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
+ list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
if (!v->arch.pending_exceptions)
n_ceded += v->arch.ceded;
+ else
+ v->arch.ceded = 0;
+ }
if (n_ceded == vc->n_runnable)
kvmppc_vcore_blocked(vc);
else
@@ -1483,7 +1540,7 @@ static int kvm_rma_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_rma_fops = {
+static const struct file_operations kvm_rma_fops = {
.mmap = kvm_rma_mmap,
.release = kvm_rma_release,
};
@@ -1515,7 +1572,13 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
(*sps)->page_shift = def->shift;
(*sps)->slb_enc = def->sllp;
(*sps)->enc[0].page_shift = def->shift;
- (*sps)->enc[0].pte_enc = def->penc;
+ /*
+ * Only return base page encoding. We don't want to return
+ * all the supporting pte_enc, because our H_ENTER doesn't
+ * support MPSS yet. Once they do, we can start passing all
+ * support pte_enc here
+ */
+ (*sps)->enc[0].pte_enc = def->penc[linux_psize];
(*sps)++;
}
@@ -1639,12 +1702,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old)
+ const struct kvm_memory_slot *old)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
- if (npages && old.npages) {
+ if (npages && old->npages) {
/*
* If modifying a memslot, reset all the rmap dirty bits.
* If this is a new memslot, we don't need to do anything
@@ -1821,6 +1884,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
cpumask_setall(&kvm->arch.need_tlb_flush);
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
kvm->arch.rma = NULL;
@@ -1866,6 +1930,8 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
kvm->arch.rma = NULL;
}
+ kvmppc_rtas_tokens_free(kvm);
+
kvmppc_free_hpt(kvm);
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 84035a528c80..37f1cc417ca0 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -122,11 +122,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
+#ifdef CONFIG_SMP
/*
* On PPC970, if the guest vcpu has an external interrupt pending,
* send ourselves an IPI so as to interrupt the guest once it
* enables interrupts. (It must have interrupts disabled,
* otherwise we would already have delivered the interrupt.)
+ *
+ * XXX If this is a UP build, smp_send_reschedule is not available,
+ * so the interrupt will be delayed until the next time the vcpu
+ * enters the guest with interrupts enabled.
*/
BEGIN_FTR_SECTION
ld r0, VCPU_PENDING_EXC(r4)
@@ -141,6 +146,7 @@ BEGIN_FTR_SECTION
mr r4, r31
32:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+#endif /* CONFIG_SMP */
/* Jump to partition switch code */
bl .kvmppc_hv_entry_trampoline
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 19c93bae1aea..6dcbb49105a4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -97,17 +97,6 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
}
EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
-/*
- * Note modification of an HPTE; set the HPTE modified bit
- * if anyone is interested.
- */
-static inline void note_hpte_modification(struct kvm *kvm,
- struct revmap_entry *rev)
-{
- if (atomic_read(&kvm->arch.hpte_mod_interest))
- rev->guest_rpte |= HPTE_GR_MODIFIED;
-}
-
/* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index,
struct revmap_entry *rev,
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
new file mode 100644
index 000000000000..b4b0082f761c
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+#include "book3s_xics.h"
+
+#define DEBUG_PASSUP
+
+static inline void rm_writeb(unsigned long paddr, u8 val)
+{
+ __asm__ __volatile__("sync; stbcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *this_vcpu)
+{
+ struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
+ unsigned long xics_phys;
+ int cpu;
+
+ /* Mark the target VCPU as having an interrupt pending */
+ vcpu->stat.queue_intr++;
+ set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
+
+ /* Kick self ? Just set MER and return */
+ if (vcpu == this_vcpu) {
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
+ return;
+ }
+
+ /* Check if the core is loaded, if not, too hard */
+ cpu = vcpu->cpu;
+ if (cpu < 0 || cpu >= nr_cpu_ids) {
+ this_icp->rm_action |= XICS_RM_KICK_VCPU;
+ this_icp->rm_kick_target = vcpu;
+ return;
+ }
+ /* In SMT cpu will always point to thread 0, we adjust it */
+ cpu += vcpu->arch.ptid;
+
+ /* Not too hard, then poke the target */
+ xics_phys = paca[cpu].kvm_hstate.xics_phys;
+ rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+}
+
+static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
+{
+ /* Note: Only called on self ! */
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
+ &vcpu->arch.pending_exceptions);
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
+}
+
+static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
+ union kvmppc_icp_state old,
+ union kvmppc_icp_state new)
+{
+ struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
+ bool success;
+
+ /* Calculate new output value */
+ new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
+
+ /* Attempt atomic update */
+ success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
+ if (!success)
+ goto bail;
+
+ /*
+ * Check for output state update
+ *
+ * Note that this is racy since another processor could be updating
+ * the state already. This is why we never clear the interrupt output
+ * here, we only ever set it. The clear only happens prior to doing
+ * an update and only by the processor itself. Currently we do it
+ * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
+ *
+ * We also do not try to figure out whether the EE state has changed,
+ * we unconditionally set it if the new state calls for it. The reason
+ * for that is that we opportunistically remove the pending interrupt
+ * flag when raising CPPR, so we need to set it back here if an
+ * interrupt is still pending.
+ */
+ if (new.out_ee)
+ icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
+
+ /* Expose the state change for debug purposes */
+ this_vcpu->arch.icp->rm_dbgstate = new;
+ this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
+
+ bail:
+ return success;
+}
+
+static inline int check_too_hard(struct kvmppc_xics *xics,
+ struct kvmppc_icp *icp)
+{
+ return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
+}
+
+static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u8 new_cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool resend;
+
+ /*
+ * This handles several related states in one operation:
+ *
+ * ICP State: Down_CPPR
+ *
+ * Load CPPR with new value and if the XISR is 0
+ * then check for resends:
+ *
+ * ICP State: Resend
+ *
+ * If MFRR is more favored than CPPR, check for IPIs
+ * and notify ICS of a potential resend. This is done
+ * asynchronously (when used in real mode, we will have
+ * to exit here).
+ *
+ * We do not handle the complete Check_IPI as documented
+ * here. In the PAPR, this state will be used for both
+ * Set_MFRR and Down_CPPR. However, we know that we aren't
+ * changing the MFRR state here so we don't need to handle
+ * the case of an MFRR causing a reject of a pending irq,
+ * this will have been handled when the MFRR was set in the
+ * first place.
+ *
+ * Thus we don't have to handle rejects, only resends.
+ *
+ * When implementing real mode for HV KVM, resend will lead to
+ * a H_TOO_HARD return and the whole transaction will be handled
+ * in virtual mode.
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Down_CPPR */
+ new_state.cppr = new_cppr;
+
+ /*
+ * Cut down Resend / Check_IPI / IPI
+ *
+ * The logic is that we cannot have a pending interrupt
+ * trumped by an IPI at this point (see above), so we
+ * know that either the pending interrupt is already an
+ * IPI (in which case we don't care to override it) or
+ * it's either more favored than us or non existent
+ */
+ if (new_state.mfrr < new_cppr &&
+ new_state.mfrr <= new_state.pending_pri) {
+ new_state.pending_pri = new_state.mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ /* Latch/clear resend bit */
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /*
+ * Now handle resend checks. Those are asynchronous to the ICP
+ * state update in HW (ie bus transactions) so we can handle them
+ * separately here as well.
+ */
+ if (resend)
+ icp->rm_action |= XICS_RM_CHECK_RESEND;
+}
+
+
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 xirr;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ /* First clear the interrupt */
+ icp_rm_clr_vcpu_irq(icp->vcpu);
+
+ /*
+ * ICP State: Accept_Interrupt
+ *
+ * Return the pending interrupt (if any) along with the
+ * current CPPR, then clear the XISR & set CPPR to the
+ * pending priority
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ if (!old_state.xisr)
+ break;
+ new_state.cppr = new_state.pending_pri;
+ new_state.pending_pri = 0xff;
+ new_state.xisr = 0;
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /* Return the result in GPR4 */
+ vcpu->arch.gpr[4] = xirr;
+
+ return check_too_hard(xics, icp);
+}
+
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
+ u32 reject;
+ bool resend;
+ bool local;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ local = this_icp->server_num == server;
+ if (local)
+ icp = this_icp;
+ else
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+
+ /*
+ * ICP state: Set_MFRR
+ *
+ * If the CPPR is more favored than the new MFRR, then
+ * nothing needs to be done as there can be no XISR to
+ * reject.
+ *
+ * If the CPPR is less favored, then we might be replacing
+ * an interrupt, and thus need to possibly reject it as in
+ *
+ * ICP state: Check_IPI
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Set_MFRR */
+ new_state.mfrr = mfrr;
+
+ /* Check_IPI */
+ reject = 0;
+ resend = false;
+ if (mfrr < new_state.cppr) {
+ /* Reject a pending interrupt if not an IPI */
+ if (mfrr <= new_state.pending_pri)
+ reject = new_state.xisr;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+ }
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /* Pass rejects to virtual mode */
+ if (reject && reject != XICS_IPI) {
+ this_icp->rm_action |= XICS_RM_REJECT;
+ this_icp->rm_reject = reject;
+ }
+
+ /* Pass resends to virtual mode */
+ if (resend)
+ this_icp->rm_action |= XICS_RM_CHECK_RESEND;
+
+ return check_too_hard(xics, this_icp);
+}
+
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 reject;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ /*
+ * ICP State: Set_CPPR
+ *
+ * We can safely compare the new value with the current
+ * value outside of the transaction as the CPPR is only
+ * ever changed by the processor on itself
+ */
+ if (cppr > icp->state.cppr) {
+ icp_rm_down_cppr(xics, icp, cppr);
+ goto bail;
+ } else if (cppr == icp->state.cppr)
+ return H_SUCCESS;
+
+ /*
+ * ICP State: Up_CPPR
+ *
+ * The processor is raising its priority, this can result
+ * in a rejection of a pending interrupt:
+ *
+ * ICP State: Reject_Current
+ *
+ * We can remove EE from the current processor, the update
+ * transaction will set it again if needed
+ */
+ icp_rm_clr_vcpu_irq(icp->vcpu);
+
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ reject = 0;
+ new_state.cppr = cppr;
+
+ if (cppr <= new_state.pending_pri) {
+ reject = new_state.xisr;
+ new_state.xisr = 0;
+ new_state.pending_pri = 0xff;
+ }
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /* Pass rejects to virtual mode */
+ if (reject && reject != XICS_IPI) {
+ icp->rm_action |= XICS_RM_REJECT;
+ icp->rm_reject = reject;
+ }
+ bail:
+ return check_too_hard(xics, icp);
+}
+
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u32 irq = xirr & 0x00ffffff;
+ u16 src;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ /*
+ * ICP State: EOI
+ *
+ * Note: If EOI is incorrectly used by SW to lower the CPPR
+ * value (ie more favored), we do not check for rejection of
+ * a pending interrupt, this is a SW error and PAPR sepcifies
+ * that we don't have to deal with it.
+ *
+ * The sending of an EOI to the ICS is handled after the
+ * CPPR update
+ *
+ * ICP State: Down_CPPR which we handle
+ * in a separate function as it's shared with H_CPPR.
+ */
+ icp_rm_down_cppr(xics, icp, xirr >> 24);
+
+ /* IPIs have no EOI */
+ if (irq == XICS_IPI)
+ goto bail;
+ /*
+ * EOI handling: If the interrupt is still asserted, we need to
+ * resend it. We can take a lockless "peek" at the ICS state here.
+ *
+ * "Message" interrupts will never have "asserted" set
+ */
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ goto bail;
+ state = &ics->irq_state[src];
+
+ /* Still asserted, resend it, we make it look like a reject */
+ if (state->asserted) {
+ icp->rm_action |= XICS_RM_REJECT;
+ icp->rm_reject = irq;
+ }
+ bail:
+ return check_too_hard(xics, icp);
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e33d11f1b977..b02f91e4c70d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,10 +79,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
* *
*****************************************************************************/
-#define XICS_XIRR 4
-#define XICS_QIRR 0xc
-#define XICS_IPI 2 /* interrupt source # for IPIs */
-
/*
* We come in here when wakened from nap mode on a secondary hw thread.
* Relocation is off and most register values are lost.
@@ -101,50 +97,51 @@ kvm_start_guest:
li r0,1
stb r0,PACA_NAPSTATELOST(r13)
- /* get vcpu pointer, NULL if we have no vcpu to run */
- ld r4,HSTATE_KVM_VCPU(r13)
- cmpdi cr1,r4,0
+ /* were we napping due to cede? */
+ lbz r0,HSTATE_NAPPING(r13)
+ cmpwi r0,0
+ bne kvm_end_cede
+
+ /*
+ * We weren't napping due to cede, so this must be a secondary
+ * thread being woken up to run a guest, or being woken up due
+ * to a stray IPI. (Or due to some machine check or hypervisor
+ * maintenance interrupt while the core is in KVM.)
+ */
/* Check the wake reason in SRR1 to see why we got here */
mfspr r3,SPRN_SRR1
rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
cmpwi r3,4 /* was it an external interrupt? */
- bne 27f
-
- /*
- * External interrupt - for now assume it is an IPI, since we
- * should never get any other interrupts sent to offline threads.
- * Only do this for secondary threads.
- */
- beq cr1,25f
- lwz r3,VCPU_PTID(r4)
- cmpwi r3,0
- beq 27f
-25: ld r5,HSTATE_XICS_PHYS(r13)
- li r0,0xff
- li r6,XICS_QIRR
- li r7,XICS_XIRR
+ bne 27f /* if not */
+ ld r5,HSTATE_XICS_PHYS(r13)
+ li r7,XICS_XIRR /* if it was an external interrupt, */
lwzcix r8,r5,r7 /* get and ack the interrupt */
sync
clrldi. r9,r8,40 /* get interrupt source ID. */
- beq 27f /* none there? */
- cmpwi r9,XICS_IPI
- bne 26f
+ beq 28f /* none there? */
+ cmpwi r9,XICS_IPI /* was it an IPI? */
+ bne 29f
+ li r0,0xff
+ li r6,XICS_MFRR
stbcix r0,r5,r6 /* clear IPI */
-26: stwcix r8,r5,r7 /* EOI the interrupt */
-
-27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+ stwcix r8,r5,r7 /* EOI the interrupt */
+ sync /* order loading of vcpu after that */
- /* reload vcpu pointer after clearing the IPI */
+ /* get vcpu pointer, NULL if we have no vcpu to run */
ld r4,HSTATE_KVM_VCPU(r13)
cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
beq kvm_no_guest
+ b kvmppc_hv_entry
- /* were we napping due to cede? */
- lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,0
- bne kvm_end_cede
+27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+ b kvm_no_guest
+28: /* SRR1 said external but ICP said nope?? */
+ b kvm_no_guest
+29: /* External non-IPI interrupt to offline secondary thread? help?? */
+ stw r8,HSTATE_SAVED_XIRR(r13)
+ b kvm_no_guest
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -260,6 +257,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
lwz r5, LPPACA_YIELDCOUNT(r3)
addi r5, r5, 1
stw r5, LPPACA_YIELDCOUNT(r3)
+ li r6, 1
+ stb r6, VCPU_VPA_DIRTY(r4)
25:
/* Load up DAR and DSISR */
ld r5, VCPU_DAR(r4)
@@ -485,20 +484,20 @@ toc_tlbie_lock:
mtctr r6
mtxer r7
+ ld r10, VCPU_PC(r4)
+ ld r11, VCPU_MSR(r4)
kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
ld r6, VCPU_SRR0(r4)
ld r7, VCPU_SRR1(r4)
- ld r10, VCPU_PC(r4)
- ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */
+ /* r11 = vcpu->arch.msr & ~MSR_HV */
rldicl r11, r11, 63 - MSR_HV_LG, 1
rotldi r11, r11, 1 + MSR_HV_LG
ori r11, r11, MSR_ME
/* Check if we can deliver an external or decrementer interrupt now */
ld r0,VCPU_PENDING_EXC(r4)
- li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
- oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
and r0,r0,r8
cmpdi cr1,r0,0
andi. r0,r11,MSR_EE
@@ -526,10 +525,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Move SRR0 and SRR1 into the respective regs */
5: mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
- li r0,0
- stb r0,VCPU_CEDED(r4) /* cancel cede */
fast_guest_return:
+ li r0,0
+ stb r0,VCPU_CEDED(r4) /* cancel cede */
mtspr SPRN_HSRR0,r10
mtspr SPRN_HSRR1,r11
@@ -676,17 +675,99 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
- /* Check for mediated interrupts (could be done earlier really ...) */
+ /* Only handle external interrupts here on arch 206 and later */
BEGIN_FTR_SECTION
- cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
- bne+ 1f
- andi. r0,r11,MSR_EE
- beq 1f
- mfspr r5,SPRN_LPCR
- andi. r0,r5,LPCR_MER
- bne bounce_ext_interrupt
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ b ext_interrupt_to_host
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
+
+ /* External interrupt ? */
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ bne+ ext_interrupt_to_host
+
+ /* External interrupt, first check for host_ipi. If this is
+ * set, we know the host wants us out so let's do it now
+ */
+do_ext_interrupt:
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne ext_interrupt_to_host
+
+ /* Now read the interrupt from the ICP */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r7, XICS_XIRR
+ cmpdi r5, 0
+ beq- ext_interrupt_to_host
+ lwzcix r3, r5, r7
+ rlwinm. r0, r3, 0, 0xffffff
+ sync
+ beq 3f /* if nothing pending in the ICP */
+
+ /* We found something in the ICP...
+ *
+ * If it's not an IPI, stash it in the PACA and return to
+ * the host, we don't (yet) handle directing real external
+ * interrupts directly to the guest
+ */
+ cmpwi r0, XICS_IPI
+ bne ext_stash_for_host
+
+ /* It's an IPI, clear the MFRR and EOI it */
+ li r0, 0xff
+ li r6, XICS_MFRR
+ stbcix r0, r5, r6 /* clear the IPI */
+ stwcix r3, r5, r7 /* EOI it */
+ sync
+
+ /* We need to re-check host IPI now in case it got set in the
+ * meantime. If it's clear, we bounce the interrupt to the
+ * guest
+ */
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne- 1f
+
+ /* Allright, looks like an IPI for the guest, we need to set MER */
+3:
+ /* Check if any CPU is heading out to the host, if so head out too */
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lwz r0, VCORE_ENTRY_EXIT(r5)
+ cmpwi r0, 0x100
+ bge ext_interrupt_to_host
+
+ /* See if there is a pending interrupt for the guest */
+ mfspr r8, SPRN_LPCR
+ ld r0, VCPU_PENDING_EXC(r9)
+ /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
+ rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
+ rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
+ beq 2f
+
+ /* And if the guest EE is set, we can deliver immediately, else
+ * we return to the guest with MER set
+ */
+ andi. r0, r11, MSR_EE
+ beq 2f
+ mtspr SPRN_SRR0, r10
+ mtspr SPRN_SRR1, r11
+ li r10, BOOK3S_INTERRUPT_EXTERNAL
+ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
+ rotldi r11, r11, 63
+2: mr r4, r9
+ mtspr SPRN_LPCR, r8
+ b fast_guest_return
+
+ /* We raced with the host, we need to resend that IPI, bummer */
+1: li r0, IPI_PRIORITY
+ stbcix r0, r5, r6 /* set the IPI */
+ sync
+ b ext_interrupt_to_host
+
+ext_stash_for_host:
+ /* It's not an IPI and it's for the host, stash it in the PACA
+ * before exit, it will be picked up by the host ICP driver
+ */
+ stw r3, HSTATE_SAVED_XIRR(r13)
+ext_interrupt_to_host:
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save DEC */
@@ -829,7 +910,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
beq 44f
ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
li r0,IPI_PRIORITY
- li r7,XICS_QIRR
+ li r7,XICS_MFRR
stbcix r0,r7,r8 /* trigger the IPI */
44: srdi. r3,r3,1
addi r6,r6,PACA_SIZE
@@ -1018,6 +1099,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
lwz r3, LPPACA_YIELDCOUNT(r8)
addi r3, r3, 1
stw r3, LPPACA_YIELDCOUNT(r8)
+ li r3, 1
+ stb r3, VCPU_VPA_DIRTY(r9)
25:
/* Save PMU registers if requested */
/* r8 and cr0.eq are live here */
@@ -1350,11 +1433,19 @@ hcall_real_table:
.long 0 /* 0x58 */
.long 0 /* 0x5c */
.long 0 /* 0x60 */
- .long 0 /* 0x64 */
- .long 0 /* 0x68 */
- .long 0 /* 0x6c */
- .long 0 /* 0x70 */
- .long 0 /* 0x74 */
+#ifdef CONFIG_KVM_XICS
+ .long .kvmppc_rm_h_eoi - hcall_real_table
+ .long .kvmppc_rm_h_cppr - hcall_real_table
+ .long .kvmppc_rm_h_ipi - hcall_real_table
+ .long 0 /* 0x70 - H_IPOLL */
+ .long .kvmppc_rm_h_xirr - hcall_real_table
+#else
+ .long 0 /* 0x64 - H_EOI */
+ .long 0 /* 0x68 - H_CPPR */
+ .long 0 /* 0x6c - H_IPI */
+ .long 0 /* 0x70 - H_IPOLL */
+ .long 0 /* 0x74 - H_XIRR */
+#endif
.long 0 /* 0x78 */
.long 0 /* 0x7c */
.long 0 /* 0x80 */
@@ -1405,15 +1496,6 @@ ignore_hdec:
mr r4,r9
b fast_guest_return
-bounce_ext_interrupt:
- mr r4,r9
- mtspr SPRN_SRR0,r10
- mtspr SPRN_SRR1,r11
- li r10,BOOK3S_INTERRUPT_EXTERNAL
- li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11,r11,63
- b fast_guest_return
-
_GLOBAL(kvmppc_h_set_dabr)
std r4,VCPU_DABR(r3)
/* Work around P7 bug where DABR can get corrupted on mtspr */
@@ -1519,6 +1601,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
b .
kvm_end_cede:
+ /* get vcpu pointer */
+ ld r4, HSTATE_KVM_VCPU(r13)
+
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
@@ -1558,6 +1643,16 @@ kvm_end_cede:
li r0,0
stb r0,HSTATE_NAPPING(r13)
+ /* Check the wake reason in SRR1 to see why we got here */
+ mfspr r3, SPRN_SRR1
+ rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
+ cmpwi r3, 4 /* was it an external interrupt? */
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+ mr r9, r4
+ ld r10, VCPU_PC(r9)
+ ld r11, VCPU_MSR(r9)
+ beq do_ext_interrupt /* if so */
+
/* see if any other thread is already exiting */
lwz r0,VCORE_ENTRY_EXIT(r5)
cmpwi r0,0x100
@@ -1577,8 +1672,7 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
- li r3,H_TOO_HARD
- blr
+ b hcall_real_fallback
/* Try to handle a machine check in real mode */
machine_check_realmode:
@@ -1626,7 +1720,7 @@ secondary_nap:
beq 37f
sync
li r0, 0xff
- li r6, XICS_QIRR
+ li r6, XICS_MFRR
stbcix r0, r5, r6 /* clear the IPI */
stwcix r3, r5, r7 /* EOI it */
37: sync
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5e93438afb06..bdc40b8e77d9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -762,9 +762,7 @@ program_interrupt:
run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST_NV;
break;
- case EMULATE_DO_PAPR:
- run->exit_reason = KVM_EXIT_PAPR_HCALL;
- vcpu->arch.hcall_needed = 1;
+ case EMULATE_EXIT_USER:
r = RESUME_HOST_NV;
break;
default:
@@ -1039,7 +1037,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (!vcpu_book3s)
goto out;
- vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ vcpu_book3s->shadow_vcpu =
kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
if (!vcpu_book3s->shadow_vcpu)
goto free_vcpu;
@@ -1283,7 +1281,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old)
+ const struct kvm_memory_slot *old)
{
}
@@ -1298,6 +1296,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
#endif
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index ee02b30878ed..b24309c6c2d5 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -227,6 +227,13 @@ static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+{
+ long rc = kvmppc_xics_hcall(vcpu, cmd);
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
switch (cmd) {
@@ -246,6 +253,20 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
+ case H_XIRR:
+ case H_CPPR:
+ case H_EOI:
+ case H_IPI:
+ if (kvmppc_xics_enabled(vcpu))
+ return kvmppc_h_pr_xics_hcall(vcpu, cmd);
+ break;
+ case H_RTAS:
+ if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ return RESUME_HOST;
+ if (kvmppc_rtas_hcall(vcpu))
+ break;
+ kvmppc_set_gpr(vcpu, 3, 0);
+ return EMULATE_DONE;
}
return EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
new file mode 100644
index 000000000000..3219ba895246
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <linux/err.h>
+
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/rtas.h>
+
+#ifdef CONFIG_KVM_XICS
+static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq, server, priority;
+ int rc;
+
+ if (args->nargs != 3 || args->nret != 1) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+ server = args->args[1];
+ priority = args->args[2];
+
+ rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
+ if (rc)
+ rc = -3;
+out:
+ args->rets[0] = rc;
+}
+
+static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq, server, priority;
+ int rc;
+
+ if (args->nargs != 1 || args->nret != 3) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+
+ server = priority = 0;
+ rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
+ if (rc) {
+ rc = -3;
+ goto out;
+ }
+
+ args->rets[1] = server;
+ args->rets[2] = priority;
+out:
+ args->rets[0] = rc;
+}
+
+static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq;
+ int rc;
+
+ if (args->nargs != 1 || args->nret != 1) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+
+ rc = kvmppc_xics_int_off(vcpu->kvm, irq);
+ if (rc)
+ rc = -3;
+out:
+ args->rets[0] = rc;
+}
+
+static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq;
+ int rc;
+
+ if (args->nargs != 1 || args->nret != 1) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+
+ rc = kvmppc_xics_int_on(vcpu->kvm, irq);
+ if (rc)
+ rc = -3;
+out:
+ args->rets[0] = rc;
+}
+#endif /* CONFIG_KVM_XICS */
+
+struct rtas_handler {
+ void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args);
+ char *name;
+};
+
+static struct rtas_handler rtas_handlers[] = {
+#ifdef CONFIG_KVM_XICS
+ { .name = "ibm,set-xive", .handler = kvm_rtas_set_xive },
+ { .name = "ibm,get-xive", .handler = kvm_rtas_get_xive },
+ { .name = "ibm,int-off", .handler = kvm_rtas_int_off },
+ { .name = "ibm,int-on", .handler = kvm_rtas_int_on },
+#endif
+};
+
+struct rtas_token_definition {
+ struct list_head list;
+ struct rtas_handler *handler;
+ u64 token;
+};
+
+static int rtas_name_matches(char *s1, char *s2)
+{
+ struct kvm_rtas_token_args args;
+ return !strncmp(s1, s2, sizeof(args.name));
+}
+
+static int rtas_token_undefine(struct kvm *kvm, char *name)
+{
+ struct rtas_token_definition *d, *tmp;
+
+ lockdep_assert_held(&kvm->lock);
+
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ if (rtas_name_matches(d->handler->name, name)) {
+ list_del(&d->list);
+ kfree(d);
+ return 0;
+ }
+ }
+
+ /* It's not an error to undefine an undefined token */
+ return 0;
+}
+
+static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
+{
+ struct rtas_token_definition *d;
+ struct rtas_handler *h = NULL;
+ bool found;
+ int i;
+
+ lockdep_assert_held(&kvm->lock);
+
+ list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
+ if (d->token == token)
+ return -EEXIST;
+ }
+
+ found = false;
+ for (i = 0; i < ARRAY_SIZE(rtas_handlers); i++) {
+ h = &rtas_handlers[i];
+ if (rtas_name_matches(h->name, name)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->handler = h;
+ d->token = token;
+
+ list_add_tail(&d->list, &kvm->arch.rtas_tokens);
+
+ return 0;
+}
+
+int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
+{
+ struct kvm_rtas_token_args args;
+ int rc;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ mutex_lock(&kvm->lock);
+
+ if (args.token)
+ rc = rtas_token_define(kvm, args.name, args.token);
+ else
+ rc = rtas_token_undefine(kvm, args.name);
+
+ mutex_unlock(&kvm->lock);
+
+ return rc;
+}
+
+int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+{
+ struct rtas_token_definition *d;
+ struct rtas_args args;
+ rtas_arg_t *orig_rets;
+ gpa_t args_phys;
+ int rc;
+
+ /* r4 contains the guest physical address of the RTAS args */
+ args_phys = kvmppc_get_gpr(vcpu, 4);
+
+ rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
+ if (rc)
+ goto fail;
+
+ /*
+ * args->rets is a pointer into args->args. Now that we've
+ * copied args we need to fix it up to point into our copy,
+ * not the guest args. We also need to save the original
+ * value so we can restore it on the way out.
+ */
+ orig_rets = args.rets;
+ args.rets = &args.args[args.nargs];
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ rc = -ENOENT;
+ list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
+ if (d->token == args.token) {
+ d->handler->handler(vcpu, &args);
+ rc = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&vcpu->kvm->lock);
+
+ if (rc == 0) {
+ args.rets = orig_rets;
+ rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
+ if (rc)
+ goto fail;
+ }
+
+ return rc;
+
+fail:
+ /*
+ * We only get here if the guest has called RTAS with a bogus
+ * args pointer. That means we can't get to the args, and so we
+ * can't fail the RTAS call. So fail right out to userspace,
+ * which should kill the guest.
+ */
+ return rc;
+}
+
+void kvmppc_rtas_tokens_free(struct kvm *kvm)
+{
+ struct rtas_token_definition *d, *tmp;
+
+ lockdep_assert_held(&kvm->lock);
+
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ list_del(&d->list);
+ kfree(d);
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
new file mode 100644
index 000000000000..f7a103756618
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -0,0 +1,1270 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/anon_inodes.h>
+
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xics.h"
+
+#if 1
+#define XICS_DBG(fmt...) do { } while (0)
+#else
+#define XICS_DBG(fmt...) trace_printk(fmt)
+#endif
+
+#define ENABLE_REALMODE true
+#define DEBUG_REALMODE false
+
+/*
+ * LOCKING
+ * =======
+ *
+ * Each ICS has a mutex protecting the information about the IRQ
+ * sources and avoiding simultaneous deliveries if the same interrupt.
+ *
+ * ICP operations are done via a single compare & swap transaction
+ * (most ICP state fits in the union kvmppc_icp_state)
+ */
+
+/*
+ * TODO
+ * ====
+ *
+ * - To speed up resends, keep a bitmap of "resend" set bits in the
+ * ICS
+ *
+ * - Speed up server# -> ICP lookup (array ? hash table ?)
+ *
+ * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
+ * locks array to improve scalability
+ */
+
+/* -- ICS routines -- */
+
+static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u32 new_irq);
+
+static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
+ bool report_status)
+{
+ struct ics_irq_state *state;
+ struct kvmppc_ics *ics;
+ u16 src;
+
+ XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics) {
+ XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
+ return -EINVAL;
+ }
+ state = &ics->irq_state[src];
+ if (!state->exists)
+ return -EINVAL;
+
+ if (report_status)
+ return state->asserted;
+
+ /*
+ * We set state->asserted locklessly. This should be fine as
+ * we are the only setter, thus concurrent access is undefined
+ * to begin with.
+ */
+ if (level == KVM_INTERRUPT_SET_LEVEL)
+ state->asserted = 1;
+ else if (level == KVM_INTERRUPT_UNSET) {
+ state->asserted = 0;
+ return 0;
+ }
+
+ /* Attempt delivery */
+ icp_deliver_irq(xics, NULL, irq);
+
+ return state->asserted;
+}
+
+static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
+ struct kvmppc_icp *icp)
+{
+ int i;
+
+ mutex_lock(&ics->lock);
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct ics_irq_state *state = &ics->irq_state[i];
+
+ if (!state->resend)
+ continue;
+
+ XICS_DBG("resend %#x prio %#x\n", state->number,
+ state->priority);
+
+ mutex_unlock(&ics->lock);
+ icp_deliver_irq(xics, icp, state->number);
+ mutex_lock(&ics->lock);
+ }
+
+ mutex_unlock(&ics->lock);
+}
+
+static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
+ struct ics_irq_state *state,
+ u32 server, u32 priority, u32 saved_priority)
+{
+ bool deliver;
+
+ mutex_lock(&ics->lock);
+
+ state->server = server;
+ state->priority = priority;
+ state->saved_priority = saved_priority;
+ deliver = false;
+ if ((state->masked_pending || state->resend) && priority != MASKED) {
+ state->masked_pending = 0;
+ deliver = true;
+ }
+
+ mutex_unlock(&ics->lock);
+
+ return deliver;
+}
+
+int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_icp *icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ icp = kvmppc_xics_find_server(kvm, server);
+ if (!icp)
+ return -EINVAL;
+
+ XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
+ irq, server, priority,
+ state->masked_pending, state->resend);
+
+ if (write_xive(xics, ics, state, server, priority, priority))
+ icp_deliver_irq(xics, icp, irq);
+
+ return 0;
+}
+
+int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ mutex_lock(&ics->lock);
+ *server = state->server;
+ *priority = state->priority;
+ mutex_unlock(&ics->lock);
+
+ return 0;
+}
+
+int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_icp *icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ icp = kvmppc_xics_find_server(kvm, state->server);
+ if (!icp)
+ return -EINVAL;
+
+ if (write_xive(xics, ics, state, state->server, state->saved_priority,
+ state->saved_priority))
+ icp_deliver_irq(xics, icp, irq);
+
+ return 0;
+}
+
+int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ write_xive(xics, ics, state, state->server, MASKED, state->priority);
+
+ return 0;
+}
+
+/* -- ICP routines, including hcalls -- */
+
+static inline bool icp_try_update(struct kvmppc_icp *icp,
+ union kvmppc_icp_state old,
+ union kvmppc_icp_state new,
+ bool change_self)
+{
+ bool success;
+
+ /* Calculate new output value */
+ new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
+
+ /* Attempt atomic update */
+ success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
+ if (!success)
+ goto bail;
+
+ XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
+ icp->server_num,
+ old.cppr, old.mfrr, old.pending_pri, old.xisr,
+ old.need_resend, old.out_ee);
+ XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
+ new.cppr, new.mfrr, new.pending_pri, new.xisr,
+ new.need_resend, new.out_ee);
+ /*
+ * Check for output state update
+ *
+ * Note that this is racy since another processor could be updating
+ * the state already. This is why we never clear the interrupt output
+ * here, we only ever set it. The clear only happens prior to doing
+ * an update and only by the processor itself. Currently we do it
+ * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
+ *
+ * We also do not try to figure out whether the EE state has changed,
+ * we unconditionally set it if the new state calls for it. The reason
+ * for that is that we opportunistically remove the pending interrupt
+ * flag when raising CPPR, so we need to set it back here if an
+ * interrupt is still pending.
+ */
+ if (new.out_ee) {
+ kvmppc_book3s_queue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ if (!change_self)
+ kvmppc_fast_vcpu_kick(icp->vcpu);
+ }
+ bail:
+ return success;
+}
+
+static void icp_check_resend(struct kvmppc_xics *xics,
+ struct kvmppc_icp *icp)
+{
+ u32 icsid;
+
+ /* Order this load with the test for need_resend in the caller */
+ smp_rmb();
+ for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
+ struct kvmppc_ics *ics = xics->ics[icsid];
+
+ if (!test_and_clear_bit(icsid, icp->resend_map))
+ continue;
+ if (!ics)
+ continue;
+ ics_check_resend(xics, ics, icp);
+ }
+}
+
+static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
+ u32 *reject)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool success;
+
+ XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
+ icp->server_num);
+
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ *reject = 0;
+
+ /* See if we can deliver */
+ success = new_state.cppr > priority &&
+ new_state.mfrr > priority &&
+ new_state.pending_pri > priority;
+
+ /*
+ * If we can, check for a rejection and perform the
+ * delivery
+ */
+ if (success) {
+ *reject = new_state.xisr;
+ new_state.xisr = irq;
+ new_state.pending_pri = priority;
+ } else {
+ /*
+ * If we failed to deliver we set need_resend
+ * so a subsequent CPPR state change causes us
+ * to try a new delivery.
+ */
+ new_state.need_resend = true;
+ }
+
+ } while (!icp_try_update(icp, old_state, new_state, false));
+
+ return success;
+}
+
+static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u32 new_irq)
+{
+ struct ics_irq_state *state;
+ struct kvmppc_ics *ics;
+ u32 reject;
+ u16 src;
+
+ /*
+ * This is used both for initial delivery of an interrupt and
+ * for subsequent rejection.
+ *
+ * Rejection can be racy vs. resends. We have evaluated the
+ * rejection in an atomic ICP transaction which is now complete,
+ * so potentially the ICP can already accept the interrupt again.
+ *
+ * So we need to retry the delivery. Essentially the reject path
+ * boils down to a failed delivery. Always.
+ *
+ * Now the interrupt could also have moved to a different target,
+ * thus we may need to re-do the ICP lookup as well
+ */
+
+ again:
+ /* Get the ICS state and lock it */
+ ics = kvmppc_xics_find_ics(xics, new_irq, &src);
+ if (!ics) {
+ XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
+ return;
+ }
+ state = &ics->irq_state[src];
+
+ /* Get a lock on the ICS */
+ mutex_lock(&ics->lock);
+
+ /* Get our server */
+ if (!icp || state->server != icp->server_num) {
+ icp = kvmppc_xics_find_server(xics->kvm, state->server);
+ if (!icp) {
+ pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
+ new_irq, state->server);
+ goto out;
+ }
+ }
+
+ /* Clear the resend bit of that interrupt */
+ state->resend = 0;
+
+ /*
+ * If masked, bail out
+ *
+ * Note: PAPR doesn't mention anything about masked pending
+ * when doing a resend, only when doing a delivery.
+ *
+ * However that would have the effect of losing a masked
+ * interrupt that was rejected and isn't consistent with
+ * the whole masked_pending business which is about not
+ * losing interrupts that occur while masked.
+ *
+ * I don't differenciate normal deliveries and resends, this
+ * implementation will differ from PAPR and not lose such
+ * interrupts.
+ */
+ if (state->priority == MASKED) {
+ XICS_DBG("irq %#x masked pending\n", new_irq);
+ state->masked_pending = 1;
+ goto out;
+ }
+
+ /*
+ * Try the delivery, this will set the need_resend flag
+ * in the ICP as part of the atomic transaction if the
+ * delivery is not possible.
+ *
+ * Note that if successful, the new delivery might have itself
+ * rejected an interrupt that was "delivered" before we took the
+ * icp mutex.
+ *
+ * In this case we do the whole sequence all over again for the
+ * new guy. We cannot assume that the rejected interrupt is less
+ * favored than the new one, and thus doesn't need to be delivered,
+ * because by the time we exit icp_try_to_deliver() the target
+ * processor may well have alrady consumed & completed it, and thus
+ * the rejected interrupt might actually be already acceptable.
+ */
+ if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
+ /*
+ * Delivery was successful, did we reject somebody else ?
+ */
+ if (reject && reject != XICS_IPI) {
+ mutex_unlock(&ics->lock);
+ new_irq = reject;
+ goto again;
+ }
+ } else {
+ /*
+ * We failed to deliver the interrupt we need to set the
+ * resend map bit and mark the ICS state as needing a resend
+ */
+ set_bit(ics->icsid, icp->resend_map);
+ state->resend = 1;
+
+ /*
+ * If the need_resend flag got cleared in the ICP some time
+ * between icp_try_to_deliver() atomic update and now, then
+ * we know it might have missed the resend_map bit. So we
+ * retry
+ */
+ smp_mb();
+ if (!icp->state.need_resend) {
+ mutex_unlock(&ics->lock);
+ goto again;
+ }
+ }
+ out:
+ mutex_unlock(&ics->lock);
+}
+
+static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u8 new_cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool resend;
+
+ /*
+ * This handles several related states in one operation:
+ *
+ * ICP State: Down_CPPR
+ *
+ * Load CPPR with new value and if the XISR is 0
+ * then check for resends:
+ *
+ * ICP State: Resend
+ *
+ * If MFRR is more favored than CPPR, check for IPIs
+ * and notify ICS of a potential resend. This is done
+ * asynchronously (when used in real mode, we will have
+ * to exit here).
+ *
+ * We do not handle the complete Check_IPI as documented
+ * here. In the PAPR, this state will be used for both
+ * Set_MFRR and Down_CPPR. However, we know that we aren't
+ * changing the MFRR state here so we don't need to handle
+ * the case of an MFRR causing a reject of a pending irq,
+ * this will have been handled when the MFRR was set in the
+ * first place.
+ *
+ * Thus we don't have to handle rejects, only resends.
+ *
+ * When implementing real mode for HV KVM, resend will lead to
+ * a H_TOO_HARD return and the whole transaction will be handled
+ * in virtual mode.
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Down_CPPR */
+ new_state.cppr = new_cppr;
+
+ /*
+ * Cut down Resend / Check_IPI / IPI
+ *
+ * The logic is that we cannot have a pending interrupt
+ * trumped by an IPI at this point (see above), so we
+ * know that either the pending interrupt is already an
+ * IPI (in which case we don't care to override it) or
+ * it's either more favored than us or non existent
+ */
+ if (new_state.mfrr < new_cppr &&
+ new_state.mfrr <= new_state.pending_pri) {
+ WARN_ON(new_state.xisr != XICS_IPI &&
+ new_state.xisr != 0);
+ new_state.pending_pri = new_state.mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ /* Latch/clear resend bit */
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+
+ } while (!icp_try_update(icp, old_state, new_state, true));
+
+ /*
+ * Now handle resend checks. Those are asynchronous to the ICP
+ * state update in HW (ie bus transactions) so we can handle them
+ * separately here too
+ */
+ if (resend)
+ icp_check_resend(xics, icp);
+}
+
+static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 xirr;
+
+ /* First, remove EE from the processor */
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ /*
+ * ICP State: Accept_Interrupt
+ *
+ * Return the pending interrupt (if any) along with the
+ * current CPPR, then clear the XISR & set CPPR to the
+ * pending priority
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ if (!old_state.xisr)
+ break;
+ new_state.cppr = new_state.pending_pri;
+ new_state.pending_pri = 0xff;
+ new_state.xisr = 0;
+
+ } while (!icp_try_update(icp, old_state, new_state, true));
+
+ XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
+
+ return xirr;
+}
+
+static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp;
+ u32 reject;
+ bool resend;
+ bool local;
+
+ XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
+ vcpu->vcpu_id, server, mfrr);
+
+ icp = vcpu->arch.icp;
+ local = icp->server_num == server;
+ if (!local) {
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+ }
+
+ /*
+ * ICP state: Set_MFRR
+ *
+ * If the CPPR is more favored than the new MFRR, then
+ * nothing needs to be rejected as there can be no XISR to
+ * reject. If the MFRR is being made less favored then
+ * there might be a previously-rejected interrupt needing
+ * to be resent.
+ *
+ * If the CPPR is less favored, then we might be replacing
+ * an interrupt, and thus need to possibly reject it as in
+ *
+ * ICP state: Check_IPI
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Set_MFRR */
+ new_state.mfrr = mfrr;
+
+ /* Check_IPI */
+ reject = 0;
+ resend = false;
+ if (mfrr < new_state.cppr) {
+ /* Reject a pending interrupt if not an IPI */
+ if (mfrr <= new_state.pending_pri)
+ reject = new_state.xisr;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+ }
+ } while (!icp_try_update(icp, old_state, new_state, local));
+
+ /* Handle reject */
+ if (reject && reject != XICS_IPI)
+ icp_deliver_irq(xics, icp, reject);
+
+ /* Handle resend */
+ if (resend)
+ icp_check_resend(xics, icp);
+
+ return H_SUCCESS;
+}
+
+static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 reject;
+
+ XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
+
+ /*
+ * ICP State: Set_CPPR
+ *
+ * We can safely compare the new value with the current
+ * value outside of the transaction as the CPPR is only
+ * ever changed by the processor on itself
+ */
+ if (cppr > icp->state.cppr)
+ icp_down_cppr(xics, icp, cppr);
+ else if (cppr == icp->state.cppr)
+ return;
+
+ /*
+ * ICP State: Up_CPPR
+ *
+ * The processor is raising its priority, this can result
+ * in a rejection of a pending interrupt:
+ *
+ * ICP State: Reject_Current
+ *
+ * We can remove EE from the current processor, the update
+ * transaction will set it again if needed
+ */
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ reject = 0;
+ new_state.cppr = cppr;
+
+ if (cppr <= new_state.pending_pri) {
+ reject = new_state.xisr;
+ new_state.xisr = 0;
+ new_state.pending_pri = 0xff;
+ }
+
+ } while (!icp_try_update(icp, old_state, new_state, true));
+
+ /*
+ * Check for rejects. They are handled by doing a new delivery
+ * attempt (see comments in icp_deliver_irq).
+ */
+ if (reject && reject != XICS_IPI)
+ icp_deliver_irq(xics, icp, reject);
+}
+
+static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u32 irq = xirr & 0x00ffffff;
+ u16 src;
+
+ XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
+
+ /*
+ * ICP State: EOI
+ *
+ * Note: If EOI is incorrectly used by SW to lower the CPPR
+ * value (ie more favored), we do not check for rejection of
+ * a pending interrupt, this is a SW error and PAPR sepcifies
+ * that we don't have to deal with it.
+ *
+ * The sending of an EOI to the ICS is handled after the
+ * CPPR update
+ *
+ * ICP State: Down_CPPR which we handle
+ * in a separate function as it's shared with H_CPPR.
+ */
+ icp_down_cppr(xics, icp, xirr >> 24);
+
+ /* IPIs have no EOI */
+ if (irq == XICS_IPI)
+ return H_SUCCESS;
+ /*
+ * EOI handling: If the interrupt is still asserted, we need to
+ * resend it. We can take a lockless "peek" at the ICS state here.
+ *
+ * "Message" interrupts will never have "asserted" set
+ */
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics) {
+ XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
+ return H_PARAMETER;
+ }
+ state = &ics->irq_state[src];
+
+ /* Still asserted, resend it */
+ if (state->asserted)
+ icp_deliver_irq(xics, icp, irq);
+
+ return H_SUCCESS;
+}
+
+static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+
+ XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
+ hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
+
+ if (icp->rm_action & XICS_RM_KICK_VCPU)
+ kvmppc_fast_vcpu_kick(icp->rm_kick_target);
+ if (icp->rm_action & XICS_RM_CHECK_RESEND)
+ icp_check_resend(xics, icp);
+ if (icp->rm_action & XICS_RM_REJECT)
+ icp_deliver_irq(xics, icp, icp->rm_reject);
+
+ icp->rm_action = 0;
+
+ return H_SUCCESS;
+}
+
+int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ unsigned long res;
+ int rc = H_SUCCESS;
+
+ /* Check if we have an ICP */
+ if (!xics || !vcpu->arch.icp)
+ return H_HARDWARE;
+
+ /* Check for real mode returning too hard */
+ if (xics->real_mode)
+ return kvmppc_xics_rm_complete(vcpu, req);
+
+ switch (req) {
+ case H_XIRR:
+ res = kvmppc_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 4, res);
+ break;
+ case H_CPPR:
+ kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ break;
+ case H_EOI:
+ rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
+ break;
+ case H_IPI:
+ rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ break;
+ }
+
+ return rc;
+}
+
+
+/* -- Initialisation code etc. -- */
+
+static int xics_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xics *xics = m->private;
+ struct kvm *kvm = xics->kvm;
+ struct kvm_vcpu *vcpu;
+ int icsid, i;
+
+ if (!kvm)
+ return 0;
+
+ seq_printf(m, "=========\nICP state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ union kvmppc_icp_state state;
+
+ if (!icp)
+ continue;
+
+ state.raw = ACCESS_ONCE(icp->state.raw);
+ seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
+ icp->server_num, state.xisr,
+ state.pending_pri, state.cppr, state.mfrr,
+ state.out_ee, state.need_resend);
+ }
+
+ for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
+ struct kvmppc_ics *ics = xics->ics[icsid];
+
+ if (!ics)
+ continue;
+
+ seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
+ icsid);
+
+ mutex_lock(&ics->lock);
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct ics_irq_state *irq = &ics->irq_state[i];
+
+ seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
+ irq->number, irq->server, irq->priority,
+ irq->saved_priority, irq->asserted,
+ irq->resend, irq->masked_pending);
+
+ }
+ mutex_unlock(&ics->lock);
+ }
+ return 0;
+}
+
+static int xics_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xics_debug_show, inode->i_private);
+}
+
+static const struct file_operations xics_debug_fops = {
+ .open = xics_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xics_debugfs_init(struct kvmppc_xics *xics)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
+ xics, &xics_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
+ struct kvmppc_xics *xics, int irq)
+{
+ struct kvmppc_ics *ics;
+ int i, icsid;
+
+ icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
+
+ mutex_lock(&kvm->lock);
+
+ /* ICS already exists - somebody else got here first */
+ if (xics->ics[icsid])
+ goto out;
+
+ /* Create the ICS */
+ ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
+ if (!ics)
+ goto out;
+
+ mutex_init(&ics->lock);
+ ics->icsid = icsid;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
+ ics->irq_state[i].priority = MASKED;
+ ics->irq_state[i].saved_priority = MASKED;
+ }
+ smp_wmb();
+ xics->ics[icsid] = ics;
+
+ if (icsid > xics->max_icsid)
+ xics->max_icsid = icsid;
+
+ out:
+ mutex_unlock(&kvm->lock);
+ return xics->ics[icsid];
+}
+
+int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
+{
+ struct kvmppc_icp *icp;
+
+ if (!vcpu->kvm->arch.xics)
+ return -ENODEV;
+
+ if (kvmppc_xics_find_server(vcpu->kvm, server_num))
+ return -EEXIST;
+
+ icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
+ if (!icp)
+ return -ENOMEM;
+
+ icp->vcpu = vcpu;
+ icp->server_num = server_num;
+ icp->state.mfrr = MASKED;
+ icp->state.pending_pri = MASKED;
+ vcpu->arch.icp = icp;
+
+ XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
+
+ return 0;
+}
+
+u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ union kvmppc_icp_state state;
+
+ if (!icp)
+ return 0;
+ state = icp->state;
+ return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
+ ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
+ ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
+ ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
+}
+
+int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+{
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_ics *ics;
+ u8 cppr, mfrr, pending_pri;
+ u32 xisr;
+ u16 src;
+ bool resend;
+
+ if (!icp || !xics)
+ return -ENOENT;
+
+ cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
+ xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
+ KVM_REG_PPC_ICP_XISR_MASK;
+ mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
+ pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
+
+ /* Require the new state to be internally consistent */
+ if (xisr == 0) {
+ if (pending_pri != 0xff)
+ return -EINVAL;
+ } else if (xisr == XICS_IPI) {
+ if (pending_pri != mfrr || pending_pri >= cppr)
+ return -EINVAL;
+ } else {
+ if (pending_pri >= mfrr || pending_pri >= cppr)
+ return -EINVAL;
+ ics = kvmppc_xics_find_ics(xics, xisr, &src);
+ if (!ics)
+ return -EINVAL;
+ }
+
+ new_state.raw = 0;
+ new_state.cppr = cppr;
+ new_state.xisr = xisr;
+ new_state.mfrr = mfrr;
+ new_state.pending_pri = pending_pri;
+
+ /*
+ * Deassert the CPU interrupt request.
+ * icp_try_update will reassert it if necessary.
+ */
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ /*
+ * Note that if we displace an interrupt from old_state.xisr,
+ * we don't mark it as rejected. We expect userspace to set
+ * the state of the interrupt sources to be consistent with
+ * the ICP states (either before or afterwards, which doesn't
+ * matter). We do handle resends due to CPPR becoming less
+ * favoured because that is necessary to end up with a
+ * consistent state in the situation where userspace restores
+ * the ICS states before the ICP states.
+ */
+ do {
+ old_state = ACCESS_ONCE(icp->state);
+
+ if (new_state.mfrr <= old_state.mfrr) {
+ resend = false;
+ new_state.need_resend = old_state.need_resend;
+ } else {
+ resend = old_state.need_resend;
+ new_state.need_resend = 0;
+ }
+ } while (!icp_try_update(icp, old_state, new_state, false));
+
+ if (resend)
+ icp_check_resend(xics, icp);
+
+ return 0;
+}
+
+static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
+{
+ int ret;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *irqp;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val, prio;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &idx);
+ if (!ics)
+ return -ENOENT;
+
+ irqp = &ics->irq_state[idx];
+ mutex_lock(&ics->lock);
+ ret = -ENOENT;
+ if (irqp->exists) {
+ val = irqp->server;
+ prio = irqp->priority;
+ if (prio == MASKED) {
+ val |= KVM_XICS_MASKED;
+ prio = irqp->saved_priority;
+ }
+ val |= prio << KVM_XICS_PRIORITY_SHIFT;
+ if (irqp->asserted)
+ val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
+ else if (irqp->masked_pending || irqp->resend)
+ val |= KVM_XICS_PENDING;
+ ret = 0;
+ }
+ mutex_unlock(&ics->lock);
+
+ if (!ret && put_user(val, ubufp))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
+{
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *irqp;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val;
+ u8 prio;
+ u32 server;
+
+ if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
+ return -ENOENT;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &idx);
+ if (!ics) {
+ ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
+ if (!ics)
+ return -ENOMEM;
+ }
+ irqp = &ics->irq_state[idx];
+ if (get_user(val, ubufp))
+ return -EFAULT;
+
+ server = val & KVM_XICS_DESTINATION_MASK;
+ prio = val >> KVM_XICS_PRIORITY_SHIFT;
+ if (prio != MASKED &&
+ kvmppc_xics_find_server(xics->kvm, server) == NULL)
+ return -EINVAL;
+
+ mutex_lock(&ics->lock);
+ irqp->server = server;
+ irqp->saved_priority = prio;
+ if (val & KVM_XICS_MASKED)
+ prio = MASKED;
+ irqp->priority = prio;
+ irqp->resend = 0;
+ irqp->masked_pending = 0;
+ irqp->asserted = 0;
+ if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
+ irqp->asserted = 1;
+ irqp->exists = 1;
+ mutex_unlock(&ics->lock);
+
+ if (val & KVM_XICS_PENDING)
+ icp_deliver_irq(xics, NULL, irqp->number);
+
+ return 0;
+}
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+
+ return ics_deliver_irq(xics, irq, level, line_status);
+}
+
+static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xics *xics = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xics_set_source(xics, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xics *xics = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xics_get_source(xics, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
+ attr->attr < KVMPPC_XICS_NR_IRQS)
+ return 0;
+ break;
+ }
+ return -ENXIO;
+}
+
+static void kvmppc_xics_free(struct kvm_device *dev)
+{
+ struct kvmppc_xics *xics = dev->private;
+ int i;
+ struct kvm *kvm = xics->kvm;
+
+ debugfs_remove(xics->dentry);
+
+ if (kvm)
+ kvm->arch.xics = NULL;
+
+ for (i = 0; i <= xics->max_icsid; i++)
+ kfree(xics->ics[i]);
+ kfree(xics);
+ kfree(dev);
+}
+
+static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xics *xics;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ xics = kzalloc(sizeof(*xics), GFP_KERNEL);
+ if (!xics)
+ return -ENOMEM;
+
+ dev->private = xics;
+ xics->dev = dev;
+ xics->kvm = kvm;
+
+ /* Already there ? */
+ mutex_lock(&kvm->lock);
+ if (kvm->arch.xics)
+ ret = -EEXIST;
+ else
+ kvm->arch.xics = xics;
+ mutex_unlock(&kvm->lock);
+
+ if (ret)
+ return ret;
+
+ xics_debugfs_init(xics);
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ /* Enable real mode support */
+ xics->real_mode = ENABLE_REALMODE;
+ xics->real_mode_dbg = DEBUG_REALMODE;
+ }
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
+ return 0;
+}
+
+struct kvm_device_ops kvm_xics_ops = {
+ .name = "kvm-xics",
+ .create = kvmppc_xics_create,
+ .destroy = kvmppc_xics_free,
+ .set_attr = xics_set_attr,
+ .get_attr = xics_get_attr,
+ .has_attr = xics_has_attr,
+};
+
+int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+ u32 xcpu)
+{
+ struct kvmppc_xics *xics = dev->private;
+ int r = -EBUSY;
+
+ if (dev->ops != &kvm_xics_ops)
+ return -EPERM;
+ if (xics->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type)
+ return -EBUSY;
+
+ r = kvmppc_xics_create_icp(vcpu, xcpu);
+ if (!r)
+ vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
+
+ return r;
+}
+
+void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->arch.icp)
+ return;
+ kfree(vcpu->arch.icp);
+ vcpu->arch.icp = NULL;
+ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+}
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
new file mode 100644
index 000000000000..dd9326c5c19b
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KVM_PPC_BOOK3S_XICS_H
+#define _KVM_PPC_BOOK3S_XICS_H
+
+/*
+ * We use a two-level tree to store interrupt source information.
+ * There are up to 1024 ICS nodes, each of which can represent
+ * 1024 sources.
+ */
+#define KVMPPC_XICS_MAX_ICS_ID 1023
+#define KVMPPC_XICS_ICS_SHIFT 10
+#define KVMPPC_XICS_IRQ_PER_ICS (1 << KVMPPC_XICS_ICS_SHIFT)
+#define KVMPPC_XICS_SRC_MASK (KVMPPC_XICS_IRQ_PER_ICS - 1)
+
+/*
+ * Interrupt source numbers below this are reserved, for example
+ * 0 is "no interrupt", and 2 is used for IPIs.
+ */
+#define KVMPPC_XICS_FIRST_IRQ 16
+#define KVMPPC_XICS_NR_IRQS ((KVMPPC_XICS_MAX_ICS_ID + 1) * \
+ KVMPPC_XICS_IRQ_PER_ICS)
+
+/* Priority value to use for disabling an interrupt */
+#define MASKED 0xff
+
+/* State for one irq source */
+struct ics_irq_state {
+ u32 number;
+ u32 server;
+ u8 priority;
+ u8 saved_priority;
+ u8 resend;
+ u8 masked_pending;
+ u8 asserted; /* Only for LSI */
+ u8 exists;
+};
+
+/* Atomic ICP state, updated with a single compare & swap */
+union kvmppc_icp_state {
+ unsigned long raw;
+ struct {
+ u8 out_ee:1;
+ u8 need_resend:1;
+ u8 cppr;
+ u8 mfrr;
+ u8 pending_pri;
+ u32 xisr;
+ };
+};
+
+/* One bit per ICS */
+#define ICP_RESEND_MAP_SIZE (KVMPPC_XICS_MAX_ICS_ID / BITS_PER_LONG + 1)
+
+struct kvmppc_icp {
+ struct kvm_vcpu *vcpu;
+ unsigned long server_num;
+ union kvmppc_icp_state state;
+ unsigned long resend_map[ICP_RESEND_MAP_SIZE];
+
+ /* Real mode might find something too hard, here's the action
+ * it might request from virtual mode
+ */
+#define XICS_RM_KICK_VCPU 0x1
+#define XICS_RM_CHECK_RESEND 0x2
+#define XICS_RM_REJECT 0x4
+ u32 rm_action;
+ struct kvm_vcpu *rm_kick_target;
+ u32 rm_reject;
+
+ /* Debug stuff for real mode */
+ union kvmppc_icp_state rm_dbgstate;
+ struct kvm_vcpu *rm_dbgtgt;
+};
+
+struct kvmppc_ics {
+ struct mutex lock;
+ u16 icsid;
+ struct ics_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
+};
+
+struct kvmppc_xics {
+ struct kvm *kvm;
+ struct kvm_device *dev;
+ struct dentry *dentry;
+ u32 max_icsid;
+ bool real_mode;
+ bool real_mode_dbg;
+ struct kvmppc_ics *ics[KVMPPC_XICS_MAX_ICS_ID + 1];
+};
+
+static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm,
+ u32 nr)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num)
+ return vcpu->arch.icp;
+ }
+ return NULL;
+}
+
+static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
+ u32 irq, u16 *source)
+{
+ u32 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
+ u16 src = irq & KVMPPC_XICS_SRC_MASK;
+ struct kvmppc_ics *ics;
+
+ if (source)
+ *source = src;
+ if (icsid > KVMPPC_XICS_MAX_ICS_ID)
+ return NULL;
+ ics = xics->ics[icsid];
+ if (!ics)
+ return NULL;
+ return ics;
+}
+
+
+#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 020923e43134..1020119226db 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -222,8 +222,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, prio);
}
-void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
- struct kvm_interrupt *irq)
+void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
{
clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
@@ -347,7 +346,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
keep_irq = true;
}
- if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled)
+ if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
update_epr = true;
switch (priority) {
@@ -428,8 +427,14 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
set_guest_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
set_guest_dear(vcpu, vcpu->arch.queued_dear);
- if (update_epr == true)
- kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
+ if (update_epr == true) {
+ if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
+ kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
+ else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
+ BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
+ kvmppc_mpic_set_epr(vcpu);
+ }
+ }
new_msr &= msr_mask;
#if defined(CONFIG_64BIT)
@@ -746,6 +751,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
+ case EMULATE_EXIT_USER:
+ return RESUME_HOST;
+
default:
BUG();
}
@@ -1148,6 +1156,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
+static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
+{
+ u32 old_tsr = vcpu->arch.tsr;
+
+ vcpu->arch.tsr = new_tsr;
+
+ if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
+ arm_next_watchdog(vcpu);
+
+ update_timer_ints(vcpu);
+}
+
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
@@ -1287,16 +1307,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
kvmppc_emulate_dec(vcpu);
}
- if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
- u32 old_tsr = vcpu->arch.tsr;
-
- vcpu->arch.tsr = sregs->u.e.tsr;
-
- if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
- arm_next_watchdog(vcpu);
-
- update_timer_ints(vcpu);
- }
+ if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
+ kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
return 0;
}
@@ -1409,84 +1421,134 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
- int r = -EINVAL;
+ int r = 0;
+ union kvmppc_one_reg val;
+ int size;
+ long int i;
+
+ size = one_reg_size(reg->id);
+ if (size > sizeof(val))
+ return -EINVAL;
switch (reg->id) {
case KVM_REG_PPC_IAC1:
case KVM_REG_PPC_IAC2:
case KVM_REG_PPC_IAC3:
- case KVM_REG_PPC_IAC4: {
- int iac = reg->id - KVM_REG_PPC_IAC1;
- r = copy_to_user((u64 __user *)(long)reg->addr,
- &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
+ case KVM_REG_PPC_IAC4:
+ i = reg->id - KVM_REG_PPC_IAC1;
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
break;
- }
case KVM_REG_PPC_DAC1:
- case KVM_REG_PPC_DAC2: {
- int dac = reg->id - KVM_REG_PPC_DAC1;
- r = copy_to_user((u64 __user *)(long)reg->addr,
- &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
+ case KVM_REG_PPC_DAC2:
+ i = reg->id - KVM_REG_PPC_DAC1;
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
break;
- }
case KVM_REG_PPC_EPR: {
u32 epr = get_guest_epr(vcpu);
- r = put_user(epr, (u32 __user *)(long)reg->addr);
+ val = get_reg_val(reg->id, epr);
break;
}
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR:
- r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
+ val = get_reg_val(reg->id, vcpu->arch.epcr);
break;
#endif
+ case KVM_REG_PPC_TCR:
+ val = get_reg_val(reg->id, vcpu->arch.tcr);
+ break;
+ case KVM_REG_PPC_TSR:
+ val = get_reg_val(reg->id, vcpu->arch.tsr);
+ break;
+ case KVM_REG_PPC_DEBUG_INST:
+ val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV);
+ break;
default:
+ r = kvmppc_get_one_reg(vcpu, reg->id, &val);
break;
}
+
+ if (r)
+ return r;
+
+ if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
+ r = -EFAULT;
+
return r;
}
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
- int r = -EINVAL;
+ int r = 0;
+ union kvmppc_one_reg val;
+ int size;
+ long int i;
+
+ size = one_reg_size(reg->id);
+ if (size > sizeof(val))
+ return -EINVAL;
+
+ if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
+ return -EFAULT;
switch (reg->id) {
case KVM_REG_PPC_IAC1:
case KVM_REG_PPC_IAC2:
case KVM_REG_PPC_IAC3:
- case KVM_REG_PPC_IAC4: {
- int iac = reg->id - KVM_REG_PPC_IAC1;
- r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
- (u64 __user *)(long)reg->addr, sizeof(u64));
+ case KVM_REG_PPC_IAC4:
+ i = reg->id - KVM_REG_PPC_IAC1;
+ vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
break;
- }
case KVM_REG_PPC_DAC1:
- case KVM_REG_PPC_DAC2: {
- int dac = reg->id - KVM_REG_PPC_DAC1;
- r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
- (u64 __user *)(long)reg->addr, sizeof(u64));
+ case KVM_REG_PPC_DAC2:
+ i = reg->id - KVM_REG_PPC_DAC1;
+ vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
break;
- }
case KVM_REG_PPC_EPR: {
- u32 new_epr;
- r = get_user(new_epr, (u32 __user *)(long)reg->addr);
- if (!r)
- kvmppc_set_epr(vcpu, new_epr);
+ u32 new_epr = set_reg_val(reg->id, val);
+ kvmppc_set_epr(vcpu, new_epr);
break;
}
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR: {
- u32 new_epcr;
- r = get_user(new_epcr, (u32 __user *)(long)reg->addr);
- if (r == 0)
- kvmppc_set_epcr(vcpu, new_epcr);
+ u32 new_epcr = set_reg_val(reg->id, val);
+ kvmppc_set_epcr(vcpu, new_epcr);
break;
}
#endif
+ case KVM_REG_PPC_OR_TSR: {
+ u32 tsr_bits = set_reg_val(reg->id, val);
+ kvmppc_set_tsr_bits(vcpu, tsr_bits);
+ break;
+ }
+ case KVM_REG_PPC_CLEAR_TSR: {
+ u32 tsr_bits = set_reg_val(reg->id, val);
+ kvmppc_clr_tsr_bits(vcpu, tsr_bits);
+ break;
+ }
+ case KVM_REG_PPC_TSR: {
+ u32 tsr = set_reg_val(reg->id, val);
+ kvmppc_set_tsr(vcpu, tsr);
+ break;
+ }
+ case KVM_REG_PPC_TCR: {
+ u32 tcr = set_reg_val(reg->id, val);
+ kvmppc_set_tcr(vcpu, tcr);
+ break;
+ }
default:
+ r = kvmppc_set_one_reg(vcpu, reg->id, &val);
break;
}
+
return r;
}
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
+
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOTSUPP;
@@ -1531,7 +1593,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old)
+ const struct kvm_memory_slot *old)
{
}
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index f4bb55c96517..2c6deb5ef2fe 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -54,8 +54,7 @@
(1<<BOOKE_INTERRUPT_DTLB_MISS) | \
(1<<BOOKE_INTERRUPT_ALIGNMENT))
-.macro KVM_HANDLER ivor_nr scratch srr0
-_GLOBAL(kvmppc_handler_\ivor_nr)
+.macro __KVM_HANDLER ivor_nr scratch srr0
/* Get pointer to vcpu and record exit number. */
mtspr \scratch , r4
mfspr r4, SPRN_SPRG_THREAD
@@ -76,6 +75,43 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
bctr
.endm
+.macro KVM_HANDLER ivor_nr scratch srr0
+_GLOBAL(kvmppc_handler_\ivor_nr)
+ __KVM_HANDLER \ivor_nr \scratch \srr0
+.endm
+
+.macro KVM_DBG_HANDLER ivor_nr scratch srr0
+_GLOBAL(kvmppc_handler_\ivor_nr)
+ mtspr \scratch, r4
+ mfspr r4, SPRN_SPRG_THREAD
+ lwz r4, THREAD_KVM_VCPU(r4)
+ stw r3, VCPU_CRIT_SAVE(r4)
+ mfcr r3
+ mfspr r4, SPRN_CSRR1
+ andi. r4, r4, MSR_PR
+ bne 1f
+ /* debug interrupt happened in enter/exit path */
+ mfspr r4, SPRN_CSRR1
+ rlwinm r4, r4, 0, ~MSR_DE
+ mtspr SPRN_CSRR1, r4
+ lis r4, 0xffff
+ ori r4, r4, 0xffff
+ mtspr SPRN_DBSR, r4
+ mfspr r4, SPRN_SPRG_THREAD
+ lwz r4, THREAD_KVM_VCPU(r4)
+ mtcr r3
+ lwz r3, VCPU_CRIT_SAVE(r4)
+ mfspr r4, \scratch
+ rfci
+1: /* debug interrupt happened in guest */
+ mtcr r3
+ mfspr r4, SPRN_SPRG_THREAD
+ lwz r4, THREAD_KVM_VCPU(r4)
+ lwz r3, VCPU_CRIT_SAVE(r4)
+ mfspr r4, \scratch
+ __KVM_HANDLER \ivor_nr \scratch \srr0
+.endm
+
.macro KVM_HANDLER_ADDR ivor_nr
.long kvmppc_handler_\ivor_nr
.endm
@@ -100,7 +136,7 @@ KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
-KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
+KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 6dd4de7802bf..ce6b73c29612 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -425,6 +425,20 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
+ return r;
+}
+
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
+ return r;
+}
+
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 41cefd43655f..c2e5e98453a6 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -23,20 +23,27 @@
#include <asm/mmu-book3e.h>
#include <asm/tlb.h>
+enum vcpu_ftr {
+ VCPU_FTR_MMU_V2
+};
+
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-#define E500_TLB_VALID 1
-#define E500_TLB_BITMAP 2
+/* entry is mapped somewhere in host TLB */
+#define E500_TLB_VALID (1 << 0)
+/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
+#define E500_TLB_BITMAP (1 << 1)
+/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
- pfn_t pfn;
- unsigned int flags; /* E500_TLB_* */
+ pfn_t pfn; /* valid only for TLB0, except briefly */
+ unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
- struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+ struct tlbe_ref ref;
};
#ifdef CONFIG_KVM_E500V2
@@ -63,17 +70,6 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM];
- /*
- * information associated with each host TLB entry --
- * TLB1 only for now. If/when guest TLB1 entries can be
- * mapped with host TLB0, this will be used for that too.
- *
- * We don't want to use this for guest TLB0 because then we'd
- * have the overhead of doing the translation again even if
- * the entry is still in the guest TLB (e.g. we swapped out
- * and back, and our host TLB entries got evicted).
- */
- struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 svr;
@@ -139,6 +135,10 @@ void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
#ifdef CONFIG_KVM_E500V2
unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -303,4 +303,18 @@ static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
#define get_tlb_sts(gtlbe) (MAS1_TS)
#endif /* !BOOKE_HV */
+static inline bool has_feature(const struct kvm_vcpu *vcpu,
+ enum vcpu_ftr ftr)
+{
+ bool has_ftr;
+ switch (ftr) {
+ case VCPU_FTR_MMU_V2:
+ has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
+ break;
+ default:
+ return false;
+ }
+ return has_ftr;
+}
+
#endif /* KVM_E500_H */
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index e78f353a836a..b10a01243abd 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -284,6 +284,16 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_TLB1CFG:
*spr_val = vcpu->arch.tlbcfg[1];
break;
+ case SPRN_TLB0PS:
+ if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
+ return EMULATE_FAIL;
+ *spr_val = vcpu->arch.tlbps[0];
+ break;
+ case SPRN_TLB1PS:
+ if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
+ return EMULATE_FAIL;
+ *spr_val = vcpu->arch.tlbps[1];
+ break;
case SPRN_L1CSR0:
*spr_val = vcpu_e500->l1csr0;
break;
@@ -307,6 +317,15 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_MMUCFG:
*spr_val = vcpu->arch.mmucfg;
break;
+ case SPRN_EPTCFG:
+ if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
+ return EMULATE_FAIL;
+ /*
+ * Legacy Linux guests access EPTCFG register even if the E.PT
+ * category is disabled in the VM. Give them a chance to live.
+ */
+ *spr_val = vcpu->arch.eptcfg;
+ break;
/* extra exceptions */
case SPRN_IVOR32:
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 5c4475983f78..c41a5a96b558 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -596,6 +596,140 @@ int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return 0;
}
+int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ int r = 0;
+ long int i;
+
+ switch (id) {
+ case KVM_REG_PPC_MAS0:
+ *val = get_reg_val(id, vcpu->arch.shared->mas0);
+ break;
+ case KVM_REG_PPC_MAS1:
+ *val = get_reg_val(id, vcpu->arch.shared->mas1);
+ break;
+ case KVM_REG_PPC_MAS2:
+ *val = get_reg_val(id, vcpu->arch.shared->mas2);
+ break;
+ case KVM_REG_PPC_MAS7_3:
+ *val = get_reg_val(id, vcpu->arch.shared->mas7_3);
+ break;
+ case KVM_REG_PPC_MAS4:
+ *val = get_reg_val(id, vcpu->arch.shared->mas4);
+ break;
+ case KVM_REG_PPC_MAS6:
+ *val = get_reg_val(id, vcpu->arch.shared->mas6);
+ break;
+ case KVM_REG_PPC_MMUCFG:
+ *val = get_reg_val(id, vcpu->arch.mmucfg);
+ break;
+ case KVM_REG_PPC_EPTCFG:
+ *val = get_reg_val(id, vcpu->arch.eptcfg);
+ break;
+ case KVM_REG_PPC_TLB0CFG:
+ case KVM_REG_PPC_TLB1CFG:
+ case KVM_REG_PPC_TLB2CFG:
+ case KVM_REG_PPC_TLB3CFG:
+ i = id - KVM_REG_PPC_TLB0CFG;
+ *val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
+ break;
+ case KVM_REG_PPC_TLB0PS:
+ case KVM_REG_PPC_TLB1PS:
+ case KVM_REG_PPC_TLB2PS:
+ case KVM_REG_PPC_TLB3PS:
+ i = id - KVM_REG_PPC_TLB0PS;
+ *val = get_reg_val(id, vcpu->arch.tlbps[i]);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
+int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ int r = 0;
+ long int i;
+
+ switch (id) {
+ case KVM_REG_PPC_MAS0:
+ vcpu->arch.shared->mas0 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MAS1:
+ vcpu->arch.shared->mas1 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MAS2:
+ vcpu->arch.shared->mas2 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MAS7_3:
+ vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MAS4:
+ vcpu->arch.shared->mas4 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MAS6:
+ vcpu->arch.shared->mas6 = set_reg_val(id, *val);
+ break;
+ /* Only allow MMU registers to be set to the config supported by KVM */
+ case KVM_REG_PPC_MMUCFG: {
+ u32 reg = set_reg_val(id, *val);
+ if (reg != vcpu->arch.mmucfg)
+ r = -EINVAL;
+ break;
+ }
+ case KVM_REG_PPC_EPTCFG: {
+ u32 reg = set_reg_val(id, *val);
+ if (reg != vcpu->arch.eptcfg)
+ r = -EINVAL;
+ break;
+ }
+ case KVM_REG_PPC_TLB0CFG:
+ case KVM_REG_PPC_TLB1CFG:
+ case KVM_REG_PPC_TLB2CFG:
+ case KVM_REG_PPC_TLB3CFG: {
+ /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
+ u32 reg = set_reg_val(id, *val);
+ i = id - KVM_REG_PPC_TLB0CFG;
+ if (reg != vcpu->arch.tlbcfg[i])
+ r = -EINVAL;
+ break;
+ }
+ case KVM_REG_PPC_TLB0PS:
+ case KVM_REG_PPC_TLB1PS:
+ case KVM_REG_PPC_TLB2PS:
+ case KVM_REG_PPC_TLB3PS: {
+ u32 reg = set_reg_val(id, *val);
+ i = id - KVM_REG_PPC_TLB0PS;
+ if (reg != vcpu->arch.tlbps[i])
+ r = -EINVAL;
+ break;
+ }
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
+static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_params *params)
+{
+ vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ if (params->tlb_sizes[0] <= 2048)
+ vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
+ vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
+
+ vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
+ vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
+ return 0;
+}
+
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
struct kvm_config_tlb *cfg)
{
@@ -692,16 +826,8 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_offset[0] = 0;
vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
- vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
-
- vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- if (params.tlb_sizes[0] <= 2048)
- vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
- vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
-
- vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
- vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
+ /* Update vcpu's MMU geometry based on SW_TLB input */
+ vcpu_mmu_geometry_update(vcpu, &params);
vcpu_e500->shared_tlb_pages = pages;
vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -737,6 +863,39 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
return 0;
}
+/* Vcpu's MMU default configuration */
+static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
+ struct kvmppc_e500_tlb_params *params)
+{
+ /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
+ vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
+
+ /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
+ vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
+ ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.tlbcfg[0] |= params[0].entries;
+ vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
+
+ vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
+ ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.tlbcfg[1] |= params[1].entries;
+ vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
+
+ if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
+ vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
+ vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
+
+ vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
+
+ /* Guest mmu emulation currently doesn't handle E.PT */
+ vcpu->arch.eptcfg = 0;
+ vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
+ vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
+ }
+
+ return 0;
+}
+
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
@@ -781,18 +940,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (!vcpu_e500->g2h_tlb1_map)
goto err;
- /* Init TLB configuration register */
- vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
- ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
- vcpu->arch.tlbcfg[0] |=
- vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
-
- vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
- ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
- vcpu->arch.tlbcfg[1] |=
- vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
+ vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index a222edfb9a9b..1c6a9d729df4 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID))
- return;
+ if (!(ref->flags & E500_TLB_VALID)) {
+ WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
+ "%s: flags %x\n", __func__, ref->flags);
+ WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
+ }
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn)
{
ref->pfn = pfn;
- ref->flags = E500_TLB_VALID;
+ ref->flags |= E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
+ /* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int tlbsel = 0;
- int i;
-
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
-}
-
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- int stlbsel = 1;
+ int tlbsel;
int i;
- kvmppc_e500_tlbil_all(vcpu_e500);
-
- for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->tlb_refs[stlbsel][i];
- kvmppc_e500_ref_release(ref);
+ for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
+ }
}
-
- clear_tlb_privs(vcpu_e500);
}
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- clear_tlb_refs(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
+ clear_tlb_privs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500);
}
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
- /* Drop old ref and setup new one. */
- kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;
- vcpu_e500->tlb_refs[1][sesel] = *ref;
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
+
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
+ WARN_ON(!(ref->flags & E500_TLB_VALID));
return sesel;
}
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref ref;
+ struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
int sesel;
int r;
- ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- &ref);
+ ref);
if (r)
return r;
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- /* Triggers after clear_tlb_refs or on initial mapping */
+ /* Triggers after clear_tlb_privs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
- vcpu_e500->tlb_refs[0] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[0])
- goto err;
-
- vcpu_e500->tlb_refs[1] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[1])
- goto err;
-
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
- goto err;
+ return -EINVAL;
return 0;
-
-err:
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
- return -EINVAL;
}
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
kfree(vcpu_e500->h2g_tlb1_rmap);
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 1f89d26e65fb..753cc99eff2b 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
+ __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
+ __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+ }
kvmppc_load_guest_fp(vcpu);
}
@@ -172,6 +177,8 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
+ else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
+ r = 0;
else
r = -ENOTSUPP;
@@ -255,6 +262,20 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
+ return r;
+}
+
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
+{
+ int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
+ return r;
+}
+
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 7a73b6f72a8b..631a2650e4e4 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -38,6 +38,7 @@
#define OP_31_XOP_TRAP 4
#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_DCBST 54
#define OP_31_XOP_TRAP_64 68
#define OP_31_XOP_DCBF 86
#define OP_31_XOP_LBZX 87
@@ -370,6 +371,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
break;
+ case OP_31_XOP_DCBST:
case OP_31_XOP_DCBF:
case OP_31_XOP_DCBI:
/* Do nothing. The guest is performing dcbi because
diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h
new file mode 100644
index 000000000000..5a9a10b90762
--- /dev/null
+++ b/arch/powerpc/kvm/irq.h
@@ -0,0 +1,20 @@
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#include <linux/kvm_host.h>
+
+static inline int irqchip_in_kernel(struct kvm *kvm)
+{
+ int ret = 0;
+
+#ifdef CONFIG_KVM_MPIC
+ ret = ret || (kvm->arch.mpic != NULL);
+#endif
+#ifdef CONFIG_KVM_XICS
+ ret = ret || (kvm->arch.xics != NULL);
+#endif
+ smp_rmb();
+ return ret;
+}
+
+#endif
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
new file mode 100644
index 000000000000..2861ae9eaae6
--- /dev/null
+++ b/arch/powerpc/kvm/mpic.c
@@ -0,0 +1,1853 @@
+/*
+ * OpenPIC emulation
+ *
+ * Copyright (c) 2004 Jocelyn Mayer
+ * 2011 Alexander Graf
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/kvm_host.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <asm/uaccess.h>
+#include <asm/mpic.h>
+#include <asm/kvm_para.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_ppc.h>
+#include "iodev.h"
+
+#define MAX_CPU 32
+#define MAX_SRC 256
+#define MAX_TMR 4
+#define MAX_IPI 4
+#define MAX_MSI 8
+#define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR)
+#define VID 0x03 /* MPIC version ID */
+
+/* OpenPIC capability flags */
+#define OPENPIC_FLAG_IDR_CRIT (1 << 0)
+#define OPENPIC_FLAG_ILR (2 << 0)
+
+/* OpenPIC address map */
+#define OPENPIC_REG_SIZE 0x40000
+#define OPENPIC_GLB_REG_START 0x0
+#define OPENPIC_GLB_REG_SIZE 0x10F0
+#define OPENPIC_TMR_REG_START 0x10F0
+#define OPENPIC_TMR_REG_SIZE 0x220
+#define OPENPIC_MSI_REG_START 0x1600
+#define OPENPIC_MSI_REG_SIZE 0x200
+#define OPENPIC_SUMMARY_REG_START 0x3800
+#define OPENPIC_SUMMARY_REG_SIZE 0x800
+#define OPENPIC_SRC_REG_START 0x10000
+#define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
+#define OPENPIC_CPU_REG_START 0x20000
+#define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
+
+struct fsl_mpic_info {
+ int max_ext;
+};
+
+static struct fsl_mpic_info fsl_mpic_20 = {
+ .max_ext = 12,
+};
+
+static struct fsl_mpic_info fsl_mpic_42 = {
+ .max_ext = 12,
+};
+
+#define FRR_NIRQ_SHIFT 16
+#define FRR_NCPU_SHIFT 8
+#define FRR_VID_SHIFT 0
+
+#define VID_REVISION_1_2 2
+#define VID_REVISION_1_3 3
+
+#define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
+
+#define GCR_RESET 0x80000000
+#define GCR_MODE_PASS 0x00000000
+#define GCR_MODE_MIXED 0x20000000
+#define GCR_MODE_PROXY 0x60000000
+
+#define TBCR_CI 0x80000000 /* count inhibit */
+#define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
+
+#define IDR_EP_SHIFT 31
+#define IDR_EP_MASK (1 << IDR_EP_SHIFT)
+#define IDR_CI0_SHIFT 30
+#define IDR_CI1_SHIFT 29
+#define IDR_P1_SHIFT 1
+#define IDR_P0_SHIFT 0
+
+#define ILR_INTTGT_MASK 0x000000ff
+#define ILR_INTTGT_INT 0x00
+#define ILR_INTTGT_CINT 0x01 /* critical */
+#define ILR_INTTGT_MCP 0x02 /* machine check */
+#define NUM_OUTPUTS 3
+
+#define MSIIR_OFFSET 0x140
+#define MSIIR_SRS_SHIFT 29
+#define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
+#define MSIIR_IBS_SHIFT 24
+#define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
+
+static int get_current_cpu(void)
+{
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+ struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
+ return vcpu ? vcpu->arch.irq_cpu_id : -1;
+#else
+ /* XXX */
+ return -1;
+#endif
+}
+
+static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
+ u32 val, int idx);
+static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
+ u32 *ptr, int idx);
+
+enum irq_type {
+ IRQ_TYPE_NORMAL = 0,
+ IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */
+ IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
+};
+
+struct irq_queue {
+ /* Round up to the nearest 64 IRQs so that the queue length
+ * won't change when moving between 32 and 64 bit hosts.
+ */
+ unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)];
+ int next;
+ int priority;
+};
+
+struct irq_source {
+ uint32_t ivpr; /* IRQ vector/priority register */
+ uint32_t idr; /* IRQ destination register */
+ uint32_t destmask; /* bitmap of CPU destinations */
+ int last_cpu;
+ int output; /* IRQ level, e.g. ILR_INTTGT_INT */
+ int pending; /* TRUE if IRQ is pending */
+ enum irq_type type;
+ bool level:1; /* level-triggered */
+ bool nomask:1; /* critical interrupts ignore mask on some FSL MPICs */
+};
+
+#define IVPR_MASK_SHIFT 31
+#define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
+#define IVPR_ACTIVITY_SHIFT 30
+#define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
+#define IVPR_MODE_SHIFT 29
+#define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
+#define IVPR_POLARITY_SHIFT 23
+#define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
+#define IVPR_SENSE_SHIFT 22
+#define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
+
+#define IVPR_PRIORITY_MASK (0xF << 16)
+#define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
+#define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
+
+/* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
+#define IDR_EP 0x80000000 /* external pin */
+#define IDR_CI 0x40000000 /* critical interrupt */
+
+struct irq_dest {
+ struct kvm_vcpu *vcpu;
+
+ int32_t ctpr; /* CPU current task priority */
+ struct irq_queue raised;
+ struct irq_queue servicing;
+
+ /* Count of IRQ sources asserting on non-INT outputs */
+ uint32_t outputs_active[NUM_OUTPUTS];
+};
+
+#define MAX_MMIO_REGIONS 10
+
+struct openpic {
+ struct kvm *kvm;
+ struct kvm_device *dev;
+ struct kvm_io_device mmio;
+ const struct mem_reg *mmio_regions[MAX_MMIO_REGIONS];
+ int num_mmio_regions;
+
+ gpa_t reg_base;
+ spinlock_t lock;
+
+ /* Behavior control */
+ struct fsl_mpic_info *fsl;
+ uint32_t model;
+ uint32_t flags;
+ uint32_t nb_irqs;
+ uint32_t vid;
+ uint32_t vir; /* Vendor identification register */
+ uint32_t vector_mask;
+ uint32_t tfrr_reset;
+ uint32_t ivpr_reset;
+ uint32_t idr_reset;
+ uint32_t brr1;
+ uint32_t mpic_mode_mask;
+
+ /* Global registers */
+ uint32_t frr; /* Feature reporting register */
+ uint32_t gcr; /* Global configuration register */
+ uint32_t pir; /* Processor initialization register */
+ uint32_t spve; /* Spurious vector register */
+ uint32_t tfrr; /* Timer frequency reporting register */
+ /* Source registers */
+ struct irq_source src[MAX_IRQ];
+ /* Local registers per output pin */
+ struct irq_dest dst[MAX_CPU];
+ uint32_t nb_cpus;
+ /* Timer registers */
+ struct {
+ uint32_t tccr; /* Global timer current count register */
+ uint32_t tbcr; /* Global timer base count register */
+ } timers[MAX_TMR];
+ /* Shared MSI registers */
+ struct {
+ uint32_t msir; /* Shared Message Signaled Interrupt Register */
+ } msi[MAX_MSI];
+ uint32_t max_irq;
+ uint32_t irq_ipi0;
+ uint32_t irq_tim0;
+ uint32_t irq_msi;
+};
+
+
+static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst,
+ int output)
+{
+ struct kvm_interrupt irq = {
+ .irq = KVM_INTERRUPT_SET_LEVEL,
+ };
+
+ if (!dst->vcpu) {
+ pr_debug("%s: destination cpu %d does not exist\n",
+ __func__, (int)(dst - &opp->dst[0]));
+ return;
+ }
+
+ pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
+ output);
+
+ if (output != ILR_INTTGT_INT) /* TODO */
+ return;
+
+ kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
+}
+
+static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst,
+ int output)
+{
+ if (!dst->vcpu) {
+ pr_debug("%s: destination cpu %d does not exist\n",
+ __func__, (int)(dst - &opp->dst[0]));
+ return;
+ }
+
+ pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
+ output);
+
+ if (output != ILR_INTTGT_INT) /* TODO */
+ return;
+
+ kvmppc_core_dequeue_external(dst->vcpu);
+}
+
+static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
+{
+ set_bit(n_IRQ, q->queue);
+}
+
+static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
+{
+ clear_bit(n_IRQ, q->queue);
+}
+
+static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
+{
+ return test_bit(n_IRQ, q->queue);
+}
+
+static void IRQ_check(struct openpic *opp, struct irq_queue *q)
+{
+ int irq = -1;
+ int next = -1;
+ int priority = -1;
+
+ for (;;) {
+ irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
+ if (irq == opp->max_irq)
+ break;
+
+ pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
+ irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
+
+ if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
+ next = irq;
+ priority = IVPR_PRIORITY(opp->src[irq].ivpr);
+ }
+ }
+
+ q->next = next;
+ q->priority = priority;
+}
+
+static int IRQ_get_next(struct openpic *opp, struct irq_queue *q)
+{
+ /* XXX: optimize */
+ IRQ_check(opp, q);
+
+ return q->next;
+}
+
+static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
+ bool active, bool was_active)
+{
+ struct irq_dest *dst;
+ struct irq_source *src;
+ int priority;
+
+ dst = &opp->dst[n_CPU];
+ src = &opp->src[n_IRQ];
+
+ pr_debug("%s: IRQ %d active %d was %d\n",
+ __func__, n_IRQ, active, was_active);
+
+ if (src->output != ILR_INTTGT_INT) {
+ pr_debug("%s: output %d irq %d active %d was %d count %d\n",
+ __func__, src->output, n_IRQ, active, was_active,
+ dst->outputs_active[src->output]);
+
+ /* On Freescale MPIC, critical interrupts ignore priority,
+ * IACK, EOI, etc. Before MPIC v4.1 they also ignore
+ * masking.
+ */
+ if (active) {
+ if (!was_active &&
+ dst->outputs_active[src->output]++ == 0) {
+ pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
+ __func__, src->output, n_CPU, n_IRQ);
+ mpic_irq_raise(opp, dst, src->output);
+ }
+ } else {
+ if (was_active &&
+ --dst->outputs_active[src->output] == 0) {
+ pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
+ __func__, src->output, n_CPU, n_IRQ);
+ mpic_irq_lower(opp, dst, src->output);
+ }
+ }
+
+ return;
+ }
+
+ priority = IVPR_PRIORITY(src->ivpr);
+
+ /* Even if the interrupt doesn't have enough priority,
+ * it is still raised, in case ctpr is lowered later.
+ */
+ if (active)
+ IRQ_setbit(&dst->raised, n_IRQ);
+ else
+ IRQ_resetbit(&dst->raised, n_IRQ);
+
+ IRQ_check(opp, &dst->raised);
+
+ if (active && priority <= dst->ctpr) {
+ pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
+ __func__, n_IRQ, priority, dst->ctpr, n_CPU);
+ active = 0;
+ }
+
+ if (active) {
+ if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
+ priority <= dst->servicing.priority) {
+ pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
+ __func__, n_IRQ, dst->servicing.next, n_CPU);
+ } else {
+ pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
+ __func__, n_CPU, n_IRQ, dst->raised.next);
+ mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
+ }
+ } else {
+ IRQ_get_next(opp, &dst->servicing);
+ if (dst->raised.priority > dst->ctpr &&
+ dst->raised.priority > dst->servicing.priority) {
+ pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
+ __func__, n_IRQ, dst->raised.next,
+ dst->raised.priority, dst->ctpr,
+ dst->servicing.priority, n_CPU);
+ /* IRQ line stays asserted */
+ } else {
+ pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
+ __func__, n_IRQ, dst->ctpr,
+ dst->servicing.priority, n_CPU);
+ mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
+ }
+ }
+}
+
+/* update pic state because registers for n_IRQ have changed value */
+static void openpic_update_irq(struct openpic *opp, int n_IRQ)
+{
+ struct irq_source *src;
+ bool active, was_active;
+ int i;
+
+ src = &opp->src[n_IRQ];
+ active = src->pending;
+
+ if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
+ /* Interrupt source is disabled */
+ pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ);
+ active = false;
+ }
+
+ was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
+
+ /*
+ * We don't have a similar check for already-active because
+ * ctpr may have changed and we need to withdraw the interrupt.
+ */
+ if (!active && !was_active) {
+ pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ);
+ return;
+ }
+
+ if (active)
+ src->ivpr |= IVPR_ACTIVITY_MASK;
+ else
+ src->ivpr &= ~IVPR_ACTIVITY_MASK;
+
+ if (src->destmask == 0) {
+ /* No target */
+ pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ);
+ return;
+ }
+
+ if (src->destmask == (1 << src->last_cpu)) {
+ /* Only one CPU is allowed to receive this IRQ */
+ IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
+ } else if (!(src->ivpr & IVPR_MODE_MASK)) {
+ /* Directed delivery mode */
+ for (i = 0; i < opp->nb_cpus; i++) {
+ if (src->destmask & (1 << i)) {
+ IRQ_local_pipe(opp, i, n_IRQ, active,
+ was_active);
+ }
+ }
+ } else {
+ /* Distributed delivery mode */
+ for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
+ if (i == opp->nb_cpus)
+ i = 0;
+
+ if (src->destmask & (1 << i)) {
+ IRQ_local_pipe(opp, i, n_IRQ, active,
+ was_active);
+ src->last_cpu = i;
+ break;
+ }
+ }
+ }
+}
+
+static void openpic_set_irq(void *opaque, int n_IRQ, int level)
+{
+ struct openpic *opp = opaque;
+ struct irq_source *src;
+
+ if (n_IRQ >= MAX_IRQ) {
+ WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ);
+ return;
+ }
+
+ src = &opp->src[n_IRQ];
+ pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
+ n_IRQ, level, src->ivpr);
+ if (src->level) {
+ /* level-sensitive irq */
+ src->pending = level;
+ openpic_update_irq(opp, n_IRQ);
+ } else {
+ /* edge-sensitive irq */
+ if (level) {
+ src->pending = 1;
+ openpic_update_irq(opp, n_IRQ);
+ }
+
+ if (src->output != ILR_INTTGT_INT) {
+ /* Edge-triggered interrupts shouldn't be used
+ * with non-INT delivery, but just in case,
+ * try to make it do something sane rather than
+ * cause an interrupt storm. This is close to
+ * what you'd probably see happen in real hardware.
+ */
+ src->pending = 0;
+ openpic_update_irq(opp, n_IRQ);
+ }
+ }
+}
+
+static void openpic_reset(struct openpic *opp)
+{
+ int i;
+
+ opp->gcr = GCR_RESET;
+ /* Initialise controller registers */
+ opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
+ (opp->vid << FRR_VID_SHIFT);
+
+ opp->pir = 0;
+ opp->spve = -1 & opp->vector_mask;
+ opp->tfrr = opp->tfrr_reset;
+ /* Initialise IRQ sources */
+ for (i = 0; i < opp->max_irq; i++) {
+ opp->src[i].ivpr = opp->ivpr_reset;
+ opp->src[i].idr = opp->idr_reset;
+
+ switch (opp->src[i].type) {
+ case IRQ_TYPE_NORMAL:
+ opp->src[i].level =
+ !!(opp->ivpr_reset & IVPR_SENSE_MASK);
+ break;
+
+ case IRQ_TYPE_FSLINT:
+ opp->src[i].ivpr |= IVPR_POLARITY_MASK;
+ break;
+
+ case IRQ_TYPE_FSLSPECIAL:
+ break;
+ }
+ }
+ /* Initialise IRQ destinations */
+ for (i = 0; i < MAX_CPU; i++) {
+ opp->dst[i].ctpr = 15;
+ memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue));
+ opp->dst[i].raised.next = -1;
+ memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue));
+ opp->dst[i].servicing.next = -1;
+ }
+ /* Initialise timers */
+ for (i = 0; i < MAX_TMR; i++) {
+ opp->timers[i].tccr = 0;
+ opp->timers[i].tbcr = TBCR_CI;
+ }
+ /* Go out of RESET state */
+ opp->gcr = 0;
+}
+
+static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ)
+{
+ return opp->src[n_IRQ].idr;
+}
+
+static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ)
+{
+ if (opp->flags & OPENPIC_FLAG_ILR)
+ return opp->src[n_IRQ].output;
+
+ return 0xffffffff;
+}
+
+static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ)
+{
+ return opp->src[n_IRQ].ivpr;
+}
+
+static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
+ uint32_t val)
+{
+ struct irq_source *src = &opp->src[n_IRQ];
+ uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
+ uint32_t crit_mask = 0;
+ uint32_t mask = normal_mask;
+ int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
+ int i;
+
+ if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
+ crit_mask = mask << crit_shift;
+ mask |= crit_mask | IDR_EP;
+ }
+
+ src->idr = val & mask;
+ pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr);
+
+ if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
+ if (src->idr & crit_mask) {
+ if (src->idr & normal_mask) {
+ pr_debug("%s: IRQ configured for multiple output types, using critical\n",
+ __func__);
+ }
+
+ src->output = ILR_INTTGT_CINT;
+ src->nomask = true;
+ src->destmask = 0;
+
+ for (i = 0; i < opp->nb_cpus; i++) {
+ int n_ci = IDR_CI0_SHIFT - i;
+
+ if (src->idr & (1UL << n_ci))
+ src->destmask |= 1UL << i;
+ }
+ } else {
+ src->output = ILR_INTTGT_INT;
+ src->nomask = false;
+ src->destmask = src->idr & normal_mask;
+ }
+ } else {
+ src->destmask = src->idr;
+ }
+}
+
+static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ,
+ uint32_t val)
+{
+ if (opp->flags & OPENPIC_FLAG_ILR) {
+ struct irq_source *src = &opp->src[n_IRQ];
+
+ src->output = val & ILR_INTTGT_MASK;
+ pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
+ src->output);
+
+ /* TODO: on MPIC v4.0 only, set nomask for non-INT */
+ }
+}
+
+static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ,
+ uint32_t val)
+{
+ uint32_t mask;
+
+ /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
+ * the polarity bit is read-only on internal interrupts.
+ */
+ mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
+ IVPR_POLARITY_MASK | opp->vector_mask;
+
+ /* ACTIVITY bit is read-only */
+ opp->src[n_IRQ].ivpr =
+ (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
+
+ /* For FSL internal interrupts, The sense bit is reserved and zero,
+ * and the interrupt is always level-triggered. Timers and IPIs
+ * have no sense or polarity bits, and are edge-triggered.
+ */
+ switch (opp->src[n_IRQ].type) {
+ case IRQ_TYPE_NORMAL:
+ opp->src[n_IRQ].level =
+ !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
+ break;
+
+ case IRQ_TYPE_FSLINT:
+ opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
+ break;
+
+ case IRQ_TYPE_FSLSPECIAL:
+ opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
+ break;
+ }
+
+ openpic_update_irq(opp, n_IRQ);
+ pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val,
+ opp->src[n_IRQ].ivpr);
+}
+
+static void openpic_gcr_write(struct openpic *opp, uint64_t val)
+{
+ if (val & GCR_RESET) {
+ openpic_reset(opp);
+ return;
+ }
+
+ opp->gcr &= ~opp->mpic_mode_mask;
+ opp->gcr |= val & opp->mpic_mode_mask;
+}
+
+static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val)
+{
+ struct openpic *opp = opaque;
+ int err = 0;
+
+ pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
+ if (addr & 0xF)
+ return 0;
+
+ switch (addr) {
+ case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
+ break;
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ case 0x80:
+ case 0x90:
+ case 0xA0:
+ case 0xB0:
+ err = openpic_cpu_write_internal(opp, addr, val,
+ get_current_cpu());
+ break;
+ case 0x1000: /* FRR */
+ break;
+ case 0x1020: /* GCR */
+ openpic_gcr_write(opp, val);
+ break;
+ case 0x1080: /* VIR */
+ break;
+ case 0x1090: /* PIR */
+ /*
+ * This register is used to reset a CPU core --
+ * let userspace handle it.
+ */
+ err = -ENXIO;
+ break;
+ case 0x10A0: /* IPI_IVPR */
+ case 0x10B0:
+ case 0x10C0:
+ case 0x10D0: {
+ int idx;
+ idx = (addr - 0x10A0) >> 4;
+ write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
+ break;
+ }
+ case 0x10E0: /* SPVE */
+ opp->spve = val & opp->vector_mask;
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr)
+{
+ struct openpic *opp = opaque;
+ u32 retval;
+ int err = 0;
+
+ pr_debug("%s: addr %#llx\n", __func__, addr);
+ retval = 0xFFFFFFFF;
+ if (addr & 0xF)
+ goto out;
+
+ switch (addr) {
+ case 0x1000: /* FRR */
+ retval = opp->frr;
+ retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT;
+ break;
+ case 0x1020: /* GCR */
+ retval = opp->gcr;
+ break;
+ case 0x1080: /* VIR */
+ retval = opp->vir;
+ break;
+ case 0x1090: /* PIR */
+ retval = 0x00000000;
+ break;
+ case 0x00: /* Block Revision Register1 (BRR1) */
+ retval = opp->brr1;
+ break;
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ case 0x80:
+ case 0x90:
+ case 0xA0:
+ case 0xB0:
+ err = openpic_cpu_read_internal(opp, addr,
+ &retval, get_current_cpu());
+ break;
+ case 0x10A0: /* IPI_IVPR */
+ case 0x10B0:
+ case 0x10C0:
+ case 0x10D0:
+ {
+ int idx;
+ idx = (addr - 0x10A0) >> 4;
+ retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
+ }
+ break;
+ case 0x10E0: /* SPVE */
+ retval = opp->spve;
+ break;
+ default:
+ break;
+ }
+
+out:
+ pr_debug("%s: => 0x%08x\n", __func__, retval);
+ *ptr = retval;
+ return err;
+}
+
+static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val)
+{
+ struct openpic *opp = opaque;
+ int idx;
+
+ addr += 0x10f0;
+
+ pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
+ if (addr & 0xF)
+ return 0;
+
+ if (addr == 0x10f0) {
+ /* TFRR */
+ opp->tfrr = val;
+ return 0;
+ }
+
+ idx = (addr >> 6) & 0x3;
+ addr = addr & 0x30;
+
+ switch (addr & 0x30) {
+ case 0x00: /* TCCR */
+ break;
+ case 0x10: /* TBCR */
+ if ((opp->timers[idx].tccr & TCCR_TOG) != 0 &&
+ (val & TBCR_CI) == 0 &&
+ (opp->timers[idx].tbcr & TBCR_CI) != 0)
+ opp->timers[idx].tccr &= ~TCCR_TOG;
+
+ opp->timers[idx].tbcr = val;
+ break;
+ case 0x20: /* TVPR */
+ write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
+ break;
+ case 0x30: /* TDR */
+ write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
+ break;
+ }
+
+ return 0;
+}
+
+static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr)
+{
+ struct openpic *opp = opaque;
+ uint32_t retval = -1;
+ int idx;
+
+ pr_debug("%s: addr %#llx\n", __func__, addr);
+ if (addr & 0xF)
+ goto out;
+
+ idx = (addr >> 6) & 0x3;
+ if (addr == 0x0) {
+ /* TFRR */
+ retval = opp->tfrr;
+ goto out;
+ }
+
+ switch (addr & 0x30) {
+ case 0x00: /* TCCR */
+ retval = opp->timers[idx].tccr;
+ break;
+ case 0x10: /* TBCR */
+ retval = opp->timers[idx].tbcr;
+ break;
+ case 0x20: /* TIPV */
+ retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
+ break;
+ case 0x30: /* TIDE (TIDR) */
+ retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
+ break;
+ }
+
+out:
+ pr_debug("%s: => 0x%08x\n", __func__, retval);
+ *ptr = retval;
+ return 0;
+}
+
+static int openpic_src_write(void *opaque, gpa_t addr, u32 val)
+{
+ struct openpic *opp = opaque;
+ int idx;
+
+ pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
+
+ addr = addr & 0xffff;
+ idx = addr >> 5;
+
+ switch (addr & 0x1f) {
+ case 0x00:
+ write_IRQreg_ivpr(opp, idx, val);
+ break;
+ case 0x10:
+ write_IRQreg_idr(opp, idx, val);
+ break;
+ case 0x18:
+ write_IRQreg_ilr(opp, idx, val);
+ break;
+ }
+
+ return 0;
+}
+
+static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr)
+{
+ struct openpic *opp = opaque;
+ uint32_t retval;
+ int idx;
+
+ pr_debug("%s: addr %#llx\n", __func__, addr);
+ retval = 0xFFFFFFFF;
+
+ addr = addr & 0xffff;
+ idx = addr >> 5;
+
+ switch (addr & 0x1f) {
+ case 0x00:
+ retval = read_IRQreg_ivpr(opp, idx);
+ break;
+ case 0x10:
+ retval = read_IRQreg_idr(opp, idx);
+ break;
+ case 0x18:
+ retval = read_IRQreg_ilr(opp, idx);
+ break;
+ }
+
+ pr_debug("%s: => 0x%08x\n", __func__, retval);
+ *ptr = retval;
+ return 0;
+}
+
+static int openpic_msi_write(void *opaque, gpa_t addr, u32 val)
+{
+ struct openpic *opp = opaque;
+ int idx = opp->irq_msi;
+ int srs, ibs;
+
+ pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
+ if (addr & 0xF)
+ return 0;
+
+ switch (addr) {
+ case MSIIR_OFFSET:
+ srs = val >> MSIIR_SRS_SHIFT;
+ idx += srs;
+ ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
+ opp->msi[srs].msir |= 1 << ibs;
+ openpic_set_irq(opp, idx, 1);
+ break;
+ default:
+ /* most registers are read-only, thus ignored */
+ break;
+ }
+
+ return 0;
+}
+
+static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr)
+{
+ struct openpic *opp = opaque;
+ uint32_t r = 0;
+ int i, srs;
+
+ pr_debug("%s: addr %#llx\n", __func__, addr);
+ if (addr & 0xF)
+ return -ENXIO;
+
+ srs = addr >> 4;
+
+ switch (addr) {
+ case 0x00:
+ case 0x10:
+ case 0x20:
+ case 0x30:
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70: /* MSIRs */
+ r = opp->msi[srs].msir;
+ /* Clear on read */
+ opp->msi[srs].msir = 0;
+ openpic_set_irq(opp, opp->irq_msi + srs, 0);
+ break;
+ case 0x120: /* MSISR */
+ for (i = 0; i < MAX_MSI; i++)
+ r |= (opp->msi[i].msir ? 1 : 0) << i;
+ break;
+ }
+
+ pr_debug("%s: => 0x%08x\n", __func__, r);
+ *ptr = r;
+ return 0;
+}
+
+static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr)
+{
+ uint32_t r = 0;
+
+ pr_debug("%s: addr %#llx\n", __func__, addr);
+
+ /* TODO: EISR/EIMR */
+
+ *ptr = r;
+ return 0;
+}
+
+static int openpic_summary_write(void *opaque, gpa_t addr, u32 val)
+{
+ pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
+
+ /* TODO: EISR/EIMR */
+ return 0;
+}
+
+static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
+ u32 val, int idx)
+{
+ struct openpic *opp = opaque;
+ struct irq_source *src;
+ struct irq_dest *dst;
+ int s_IRQ, n_IRQ;
+
+ pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx,
+ addr, val);
+
+ if (idx < 0)
+ return 0;
+
+ if (addr & 0xF)
+ return 0;
+
+ dst = &opp->dst[idx];
+ addr &= 0xFF0;
+ switch (addr) {
+ case 0x40: /* IPIDR */
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ idx = (addr - 0x40) >> 4;
+ /* we use IDE as mask which CPUs to deliver the IPI to still. */
+ opp->src[opp->irq_ipi0 + idx].destmask |= val;
+ openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
+ openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
+ break;
+ case 0x80: /* CTPR */
+ dst->ctpr = val & 0x0000000F;
+
+ pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
+ __func__, idx, dst->ctpr, dst->raised.priority,
+ dst->servicing.priority);
+
+ if (dst->raised.priority <= dst->ctpr) {
+ pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
+ __func__, idx);
+ mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
+ } else if (dst->raised.priority > dst->servicing.priority) {
+ pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
+ __func__, idx, dst->raised.next);
+ mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
+ }
+
+ break;
+ case 0x90: /* WHOAMI */
+ /* Read-only register */
+ break;
+ case 0xA0: /* IACK */
+ /* Read-only register */
+ break;
+ case 0xB0: { /* EOI */
+ int notify_eoi;
+
+ pr_debug("EOI\n");
+ s_IRQ = IRQ_get_next(opp, &dst->servicing);
+
+ if (s_IRQ < 0) {
+ pr_debug("%s: EOI with no interrupt in service\n",
+ __func__);
+ break;
+ }
+
+ IRQ_resetbit(&dst->servicing, s_IRQ);
+ /* Notify listeners that the IRQ is over */
+ notify_eoi = s_IRQ;
+ /* Set up next servicing IRQ */
+ s_IRQ = IRQ_get_next(opp, &dst->servicing);
+ /* Check queued interrupts. */
+ n_IRQ = IRQ_get_next(opp, &dst->raised);
+ src = &opp->src[n_IRQ];
+ if (n_IRQ != -1 &&
+ (s_IRQ == -1 ||
+ IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
+ pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
+ idx, n_IRQ);
+ mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
+ }
+
+ spin_unlock(&opp->lock);
+ kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
+ spin_lock(&opp->lock);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val)
+{
+ struct openpic *opp = opaque;
+
+ return openpic_cpu_write_internal(opp, addr, val,
+ (addr & 0x1f000) >> 12);
+}
+
+static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
+ int cpu)
+{
+ struct irq_source *src;
+ int retval, irq;
+
+ pr_debug("Lower OpenPIC INT output\n");
+ mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
+
+ irq = IRQ_get_next(opp, &dst->raised);
+ pr_debug("IACK: irq=%d\n", irq);
+
+ if (irq == -1)
+ /* No more interrupt pending */
+ return opp->spve;
+
+ src = &opp->src[irq];
+ if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
+ !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
+ pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
+ __func__, irq, dst->ctpr, src->ivpr);
+ openpic_update_irq(opp, irq);
+ retval = opp->spve;
+ } else {
+ /* IRQ enter servicing state */
+ IRQ_setbit(&dst->servicing, irq);
+ retval = IVPR_VECTOR(opp, src->ivpr);
+ }
+
+ if (!src->level) {
+ /* edge-sensitive IRQ */
+ src->ivpr &= ~IVPR_ACTIVITY_MASK;
+ src->pending = 0;
+ IRQ_resetbit(&dst->raised, irq);
+ }
+
+ if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) {
+ src->destmask &= ~(1 << cpu);
+ if (src->destmask && !src->level) {
+ /* trigger on CPUs that didn't know about it yet */
+ openpic_set_irq(opp, irq, 1);
+ openpic_set_irq(opp, irq, 0);
+ /* if all CPUs knew about it, set active bit again */
+ src->ivpr |= IVPR_ACTIVITY_MASK;
+ }
+ }
+
+ return retval;
+}
+
+void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
+{
+ struct openpic *opp = vcpu->arch.mpic;
+ int cpu = vcpu->arch.irq_cpu_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&opp->lock, flags);
+
+ if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
+ kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
+
+ spin_unlock_irqrestore(&opp->lock, flags);
+}
+
+static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
+ u32 *ptr, int idx)
+{
+ struct openpic *opp = opaque;
+ struct irq_dest *dst;
+ uint32_t retval;
+
+ pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr);
+ retval = 0xFFFFFFFF;
+
+ if (idx < 0)
+ goto out;
+
+ if (addr & 0xF)
+ goto out;
+
+ dst = &opp->dst[idx];
+ addr &= 0xFF0;
+ switch (addr) {
+ case 0x80: /* CTPR */
+ retval = dst->ctpr;
+ break;
+ case 0x90: /* WHOAMI */
+ retval = idx;
+ break;
+ case 0xA0: /* IACK */
+ retval = openpic_iack(opp, dst, idx);
+ break;
+ case 0xB0: /* EOI */
+ retval = 0;
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s: => 0x%08x\n", __func__, retval);
+
+out:
+ *ptr = retval;
+ return 0;
+}
+
+static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr)
+{
+ struct openpic *opp = opaque;
+
+ return openpic_cpu_read_internal(opp, addr, ptr,
+ (addr & 0x1f000) >> 12);
+}
+
+struct mem_reg {
+ int (*read)(void *opaque, gpa_t addr, u32 *ptr);
+ int (*write)(void *opaque, gpa_t addr, u32 val);
+ gpa_t start_addr;
+ int size;
+};
+
+static const struct mem_reg openpic_gbl_mmio = {
+ .write = openpic_gbl_write,
+ .read = openpic_gbl_read,
+ .start_addr = OPENPIC_GLB_REG_START,
+ .size = OPENPIC_GLB_REG_SIZE,
+};
+
+static const struct mem_reg openpic_tmr_mmio = {
+ .write = openpic_tmr_write,
+ .read = openpic_tmr_read,
+ .start_addr = OPENPIC_TMR_REG_START,
+ .size = OPENPIC_TMR_REG_SIZE,
+};
+
+static const struct mem_reg openpic_cpu_mmio = {
+ .write = openpic_cpu_write,
+ .read = openpic_cpu_read,
+ .start_addr = OPENPIC_CPU_REG_START,
+ .size = OPENPIC_CPU_REG_SIZE,
+};
+
+static const struct mem_reg openpic_src_mmio = {
+ .write = openpic_src_write,
+ .read = openpic_src_read,
+ .start_addr = OPENPIC_SRC_REG_START,
+ .size = OPENPIC_SRC_REG_SIZE,
+};
+
+static const struct mem_reg openpic_msi_mmio = {
+ .read = openpic_msi_read,
+ .write = openpic_msi_write,
+ .start_addr = OPENPIC_MSI_REG_START,
+ .size = OPENPIC_MSI_REG_SIZE,
+};
+
+static const struct mem_reg openpic_summary_mmio = {
+ .read = openpic_summary_read,
+ .write = openpic_summary_write,
+ .start_addr = OPENPIC_SUMMARY_REG_START,
+ .size = OPENPIC_SUMMARY_REG_SIZE,
+};
+
+static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr)
+{
+ if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) {
+ WARN(1, "kvm mpic: too many mmio regions\n");
+ return;
+ }
+
+ opp->mmio_regions[opp->num_mmio_regions++] = mr;
+}
+
+static void fsl_common_init(struct openpic *opp)
+{
+ int i;
+ int virq = MAX_SRC;
+
+ add_mmio_region(opp, &openpic_msi_mmio);
+ add_mmio_region(opp, &openpic_summary_mmio);
+
+ opp->vid = VID_REVISION_1_2;
+ opp->vir = VIR_GENERIC;
+ opp->vector_mask = 0xFFFF;
+ opp->tfrr_reset = 0;
+ opp->ivpr_reset = IVPR_MASK_MASK;
+ opp->idr_reset = 1 << 0;
+ opp->max_irq = MAX_IRQ;
+
+ opp->irq_ipi0 = virq;
+ virq += MAX_IPI;
+ opp->irq_tim0 = virq;
+ virq += MAX_TMR;
+
+ BUG_ON(virq > MAX_IRQ);
+
+ opp->irq_msi = 224;
+
+ for (i = 0; i < opp->fsl->max_ext; i++)
+ opp->src[i].level = false;
+
+ /* Internal interrupts, including message and MSI */
+ for (i = 16; i < MAX_SRC; i++) {
+ opp->src[i].type = IRQ_TYPE_FSLINT;
+ opp->src[i].level = true;
+ }
+
+ /* timers and IPIs */
+ for (i = MAX_SRC; i < virq; i++) {
+ opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
+ opp->src[i].level = false;
+ }
+}
+
+static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr)
+{
+ int i;
+
+ for (i = 0; i < opp->num_mmio_regions; i++) {
+ const struct mem_reg *mr = opp->mmio_regions[i];
+
+ if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
+ continue;
+
+ return mr->read(opp, addr - mr->start_addr, ptr);
+ }
+
+ return -ENXIO;
+}
+
+static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
+{
+ int i;
+
+ for (i = 0; i < opp->num_mmio_regions; i++) {
+ const struct mem_reg *mr = opp->mmio_regions[i];
+
+ if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
+ continue;
+
+ return mr->write(opp, addr - mr->start_addr, val);
+ }
+
+ return -ENXIO;
+}
+
+static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
+ int len, void *ptr)
+{
+ struct openpic *opp = container_of(this, struct openpic, mmio);
+ int ret;
+ union {
+ u32 val;
+ u8 bytes[4];
+ } u;
+
+ if (addr & (len - 1)) {
+ pr_debug("%s: bad alignment %llx/%d\n",
+ __func__, addr, len);
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&opp->lock);
+ ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
+ spin_unlock_irq(&opp->lock);
+
+ /*
+ * Technically only 32-bit accesses are allowed, but be nice to
+ * people dumping registers a byte at a time -- it works in real
+ * hardware (reads only, not writes).
+ */
+ if (len == 4) {
+ *(u32 *)ptr = u.val;
+ pr_debug("%s: addr %llx ret %d len 4 val %x\n",
+ __func__, addr, ret, u.val);
+ } else if (len == 1) {
+ *(u8 *)ptr = u.bytes[addr & 3];
+ pr_debug("%s: addr %llx ret %d len 1 val %x\n",
+ __func__, addr, ret, u.bytes[addr & 3]);
+ } else {
+ pr_debug("%s: bad length %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
+ int len, const void *ptr)
+{
+ struct openpic *opp = container_of(this, struct openpic, mmio);
+ int ret;
+
+ if (len != 4) {
+ pr_debug("%s: bad length %d\n", __func__, len);
+ return -EOPNOTSUPP;
+ }
+ if (addr & 3) {
+ pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len);
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irq(&opp->lock);
+ ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
+ *(const u32 *)ptr);
+ spin_unlock_irq(&opp->lock);
+
+ pr_debug("%s: addr %llx ret %d val %x\n",
+ __func__, addr, ret, *(const u32 *)ptr);
+
+ return ret;
+}
+
+static const struct kvm_io_device_ops mpic_mmio_ops = {
+ .read = kvm_mpic_read,
+ .write = kvm_mpic_write,
+};
+
+static void map_mmio(struct openpic *opp)
+{
+ kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
+
+ kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
+ opp->reg_base, OPENPIC_REG_SIZE,
+ &opp->mmio);
+}
+
+static void unmap_mmio(struct openpic *opp)
+{
+ kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
+}
+
+static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
+{
+ u64 base;
+
+ if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64)))
+ return -EFAULT;
+
+ if (base & 0x3ffff) {
+ pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
+ __func__, base);
+ return -EINVAL;
+ }
+
+ if (base == opp->reg_base)
+ return 0;
+
+ mutex_lock(&opp->kvm->slots_lock);
+
+ unmap_mmio(opp);
+ opp->reg_base = base;
+
+ pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
+ __func__, base);
+
+ if (base == 0)
+ goto out;
+
+ map_mmio(opp);
+
+out:
+ mutex_unlock(&opp->kvm->slots_lock);
+ return 0;
+}
+
+#define ATTR_SET 0
+#define ATTR_GET 1
+
+static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
+{
+ int ret;
+
+ if (addr & 3)
+ return -ENXIO;
+
+ spin_lock_irq(&opp->lock);
+
+ if (type == ATTR_SET)
+ ret = kvm_mpic_write_internal(opp, addr, *val);
+ else
+ ret = kvm_mpic_read_internal(opp, addr, val);
+
+ spin_unlock_irq(&opp->lock);
+
+ pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
+
+ return ret;
+}
+
+static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct openpic *opp = dev->private;
+ u32 attr32;
+
+ switch (attr->group) {
+ case KVM_DEV_MPIC_GRP_MISC:
+ switch (attr->attr) {
+ case KVM_DEV_MPIC_BASE_ADDR:
+ return set_base_addr(opp, attr);
+ }
+
+ break;
+
+ case KVM_DEV_MPIC_GRP_REGISTER:
+ if (get_user(attr32, (u32 __user *)(long)attr->addr))
+ return -EFAULT;
+
+ return access_reg(opp, attr->attr, &attr32, ATTR_SET);
+
+ case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
+ if (attr->attr > MAX_SRC)
+ return -EINVAL;
+
+ if (get_user(attr32, (u32 __user *)(long)attr->addr))
+ return -EFAULT;
+
+ if (attr32 != 0 && attr32 != 1)
+ return -EINVAL;
+
+ spin_lock_irq(&opp->lock);
+ openpic_set_irq(opp, attr->attr, attr32);
+ spin_unlock_irq(&opp->lock);
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct openpic *opp = dev->private;
+ u64 attr64;
+ u32 attr32;
+ int ret;
+
+ switch (attr->group) {
+ case KVM_DEV_MPIC_GRP_MISC:
+ switch (attr->attr) {
+ case KVM_DEV_MPIC_BASE_ADDR:
+ mutex_lock(&opp->kvm->slots_lock);
+ attr64 = opp->reg_base;
+ mutex_unlock(&opp->kvm->slots_lock);
+
+ if (copy_to_user((u64 __user *)(long)attr->addr,
+ &attr64, sizeof(u64)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ break;
+
+ case KVM_DEV_MPIC_GRP_REGISTER:
+ ret = access_reg(opp, attr->attr, &attr32, ATTR_GET);
+ if (ret)
+ return ret;
+
+ if (put_user(attr32, (u32 __user *)(long)attr->addr))
+ return -EFAULT;
+
+ return 0;
+
+ case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
+ if (attr->attr > MAX_SRC)
+ return -EINVAL;
+
+ spin_lock_irq(&opp->lock);
+ attr32 = opp->src[attr->attr].pending;
+ spin_unlock_irq(&opp->lock);
+
+ if (put_user(attr32, (u32 __user *)(long)attr->addr))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_MPIC_GRP_MISC:
+ switch (attr->attr) {
+ case KVM_DEV_MPIC_BASE_ADDR:
+ return 0;
+ }
+
+ break;
+
+ case KVM_DEV_MPIC_GRP_REGISTER:
+ return 0;
+
+ case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
+ if (attr->attr > MAX_SRC)
+ break;
+
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static void mpic_destroy(struct kvm_device *dev)
+{
+ struct openpic *opp = dev->private;
+
+ dev->kvm->arch.mpic = NULL;
+ kfree(opp);
+}
+
+static int mpic_set_default_irq_routing(struct openpic *opp)
+{
+ struct kvm_irq_routing_entry *routing;
+
+ /* Create a nop default map, so that dereferencing it still works */
+ routing = kzalloc((sizeof(*routing)), GFP_KERNEL);
+ if (!routing)
+ return -ENOMEM;
+
+ kvm_set_irq_routing(opp->kvm, routing, 0, 0);
+
+ kfree(routing);
+ return 0;
+}
+
+static int mpic_create(struct kvm_device *dev, u32 type)
+{
+ struct openpic *opp;
+ int ret;
+
+ /* We only support one MPIC at a time for now */
+ if (dev->kvm->arch.mpic)
+ return -EINVAL;
+
+ opp = kzalloc(sizeof(struct openpic), GFP_KERNEL);
+ if (!opp)
+ return -ENOMEM;
+
+ dev->private = opp;
+ opp->kvm = dev->kvm;
+ opp->dev = dev;
+ opp->model = type;
+ spin_lock_init(&opp->lock);
+
+ add_mmio_region(opp, &openpic_gbl_mmio);
+ add_mmio_region(opp, &openpic_tmr_mmio);
+ add_mmio_region(opp, &openpic_src_mmio);
+ add_mmio_region(opp, &openpic_cpu_mmio);
+
+ switch (opp->model) {
+ case KVM_DEV_TYPE_FSL_MPIC_20:
+ opp->fsl = &fsl_mpic_20;
+ opp->brr1 = 0x00400200;
+ opp->flags |= OPENPIC_FLAG_IDR_CRIT;
+ opp->nb_irqs = 80;
+ opp->mpic_mode_mask = GCR_MODE_MIXED;
+
+ fsl_common_init(opp);
+
+ break;
+
+ case KVM_DEV_TYPE_FSL_MPIC_42:
+ opp->fsl = &fsl_mpic_42;
+ opp->brr1 = 0x00400402;
+ opp->flags |= OPENPIC_FLAG_ILR;
+ opp->nb_irqs = 196;
+ opp->mpic_mode_mask = GCR_MODE_PROXY;
+
+ fsl_common_init(opp);
+
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = mpic_set_default_irq_routing(opp);
+ if (ret)
+ goto err;
+
+ openpic_reset(opp);
+
+ smp_wmb();
+ dev->kvm->arch.mpic = opp;
+
+ return 0;
+
+err:
+ kfree(opp);
+ return ret;
+}
+
+struct kvm_device_ops kvm_mpic_ops = {
+ .name = "kvm-mpic",
+ .create = mpic_create,
+ .destroy = mpic_destroy,
+ .set_attr = mpic_set_attr,
+ .get_attr = mpic_get_attr,
+ .has_attr = mpic_has_attr,
+};
+
+int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+ u32 cpu)
+{
+ struct openpic *opp = dev->private;
+ int ret = 0;
+
+ if (dev->ops != &kvm_mpic_ops)
+ return -EPERM;
+ if (opp->kvm != vcpu->kvm)
+ return -EPERM;
+ if (cpu < 0 || cpu >= MAX_CPU)
+ return -EPERM;
+
+ spin_lock_irq(&opp->lock);
+
+ if (opp->dst[cpu].vcpu) {
+ ret = -EEXIST;
+ goto out;
+ }
+ if (vcpu->arch.irq_type) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ opp->dst[cpu].vcpu = vcpu;
+ opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
+
+ vcpu->arch.mpic = opp;
+ vcpu->arch.irq_cpu_id = cpu;
+ vcpu->arch.irq_type = KVMPPC_IRQ_MPIC;
+
+ /* This might need to be changed if GCR gets extended */
+ if (opp->mpic_mode_mask == GCR_MODE_PROXY)
+ vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
+
+out:
+ spin_unlock_irq(&opp->lock);
+ return ret;
+}
+
+/*
+ * This should only happen immediately before the mpic is destroyed,
+ * so we shouldn't need to worry about anything still trying to
+ * access the vcpu pointer.
+ */
+void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
+{
+ BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
+
+ opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
+}
+
+/*
+ * Return value:
+ * < 0 Interrupt was ignored (masked or not delivered for other reasons)
+ * = 0 Interrupt was coalesced (previous irq is still pending)
+ * > 0 Number of CPUs interrupt was delivered to
+ */
+static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ u32 irq = e->irqchip.pin;
+ struct openpic *opp = kvm->arch.mpic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&opp->lock, flags);
+ openpic_set_irq(opp, irq, level);
+ spin_unlock_irqrestore(&opp->lock, flags);
+
+ /* All code paths we care about don't check for the return value */
+ return 0;
+}
+
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
+{
+ struct openpic *opp = kvm->arch.mpic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&opp->lock, flags);
+
+ /*
+ * XXX We ignore the target address for now, as we only support
+ * a single MSI bank.
+ */
+ openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
+ spin_unlock_irqrestore(&opp->lock, flags);
+
+ /* All code paths we care about don't check for the return value */
+ return 0;
+}
+
+int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+{
+ int r = -EINVAL;
+
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ e->set = mpic_set_irq;
+ e->irqchip.irqchip = ue->u.irqchip.irqchip;
+ e->irqchip.pin = ue->u.irqchip.pin;
+ if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
+ goto out;
+ rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
+ break;
+ case KVM_IRQ_ROUTING_MSI:
+ e->set = kvm_set_msi;
+ e->msi.address_lo = ue->u.msi.address_lo;
+ e->msi.address_hi = ue->u.msi.address_hi;
+ e->msi.data = ue->u.msi.data;
+ break;
+ default:
+ goto out;
+ }
+
+ r = 0;
+out:
+ return r;
+}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 934413cd3a1b..6316ee336e88 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -25,6 +25,7 @@
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/file.h>
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
@@ -32,6 +33,7 @@
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include "timing.h"
+#include "irq.h"
#include "../mm/mmu_decl.h"
#define CREATE_TRACE_POINTS
@@ -317,6 +319,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_ONE_REG:
case KVM_CAP_IOEVENTFD:
+ case KVM_CAP_DEVICE_CTRL:
r = 1;
break;
#ifndef CONFIG_KVM_BOOK3S_64_HV
@@ -326,6 +329,9 @@ int kvm_dev_ioctl_check_extension(long ext)
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
+#ifdef CONFIG_KVM_MPIC
+ case KVM_CAP_IRQ_MPIC:
+#endif
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -335,6 +341,10 @@ int kvm_dev_ioctl_check_extension(long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_PPC_ALLOC_HTAB:
+ case KVM_CAP_PPC_RTAS:
+#ifdef CONFIG_KVM_XICS
+ case KVM_CAP_IRQ_XICS:
+#endif
r = 1;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -411,18 +421,17 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ enum kvm_mr_change change)
{
kvmppc_core_commit_memory_region(kvm, mem, old);
}
@@ -460,6 +469,16 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
tasklet_kill(&vcpu->arch.tasklet);
kvmppc_remove_vcpu_debugfs(vcpu);
+
+ switch (vcpu->arch.irq_type) {
+ case KVMPPC_IRQ_MPIC:
+ kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
+ break;
+ case KVMPPC_IRQ_XICS:
+ kvmppc_xics_free_icp(vcpu);
+ break;
+ }
+
kvmppc_core_vcpu_free(vcpu);
}
@@ -532,12 +551,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
#endif
}
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
@@ -612,6 +625,8 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_bigendian)
{
+ int idx, ret;
+
if (bytes > sizeof(run->mmio.data)) {
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
run->mmio.len);
@@ -627,8 +642,14 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0;
- if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
- bytes, &run->mmio.data)) {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ bytes, &run->mmio.data);
+
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ if (!ret) {
kvmppc_complete_mmio_load(vcpu, run);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
@@ -653,6 +674,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_bigendian)
{
void *data = run->mmio.data;
+ int idx, ret;
if (bytes > sizeof(run->mmio.data)) {
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -682,9 +704,14 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
- if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
- bytes, &run->mmio.data)) {
- kvmppc_complete_mmio_load(vcpu, run);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ bytes, &run->mmio.data);
+
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ if (!ret) {
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
@@ -740,7 +767,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
if (irq->irq == KVM_INTERRUPT_UNSET) {
- kvmppc_core_dequeue_external(vcpu, irq);
+ kvmppc_core_dequeue_external(vcpu);
return 0;
}
@@ -770,7 +797,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
case KVM_CAP_PPC_EPR:
r = 0;
- vcpu->arch.epr_enabled = cap->args[0];
+ if (cap->args[0])
+ vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
+ else
+ vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
break;
#ifdef CONFIG_BOOKE
case KVM_CAP_PPC_BOOKE_WATCHDOG:
@@ -791,6 +821,44 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
}
#endif
+#ifdef CONFIG_KVM_MPIC
+ case KVM_CAP_IRQ_MPIC: {
+ struct file *filp;
+ struct kvm_device *dev;
+
+ r = -EBADF;
+ filp = fget(cap->args[0]);
+ if (!filp)
+ break;
+
+ r = -EPERM;
+ dev = kvm_device_from_filp(filp);
+ if (dev)
+ r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
+
+ fput(filp);
+ break;
+ }
+#endif
+#ifdef CONFIG_KVM_XICS
+ case KVM_CAP_IRQ_XICS: {
+ struct file *filp;
+ struct kvm_device *dev;
+
+ r = -EBADF;
+ filp = fget(cap->args[0]);
+ if (!filp)
+ break;
+
+ r = -EPERM;
+ dev = kvm_device_from_filp(filp);
+ if (dev)
+ r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+
+ fput(filp);
+ break;
+ }
+#endif /* CONFIG_KVM_XICS */
default:
r = -EINVAL;
break;
@@ -913,9 +981,22 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
return 0;
}
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+ bool line_status)
+{
+ if (!irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+ irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ irq_event->irq, irq_event->level,
+ line_status);
+ return 0;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
+ struct kvm *kvm __maybe_unused = filp->private_data;
void __user *argp = (void __user *)arg;
long r;
@@ -934,7 +1015,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce;
- struct kvm *kvm = filp->private_data;
r = -EFAULT;
if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
@@ -946,8 +1026,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: {
- struct kvm *kvm = filp->private_data;
struct kvm_allocate_rma rma;
+ struct kvm *kvm = filp->private_data;
r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
@@ -956,7 +1036,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
case KVM_PPC_ALLOCATE_HTAB: {
- struct kvm *kvm = filp->private_data;
u32 htab_order;
r = -EFAULT;
@@ -973,7 +1052,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
case KVM_PPC_GET_HTAB_FD: {
- struct kvm *kvm = filp->private_data;
struct kvm_get_htab_fd ghf;
r = -EFAULT;
@@ -986,7 +1064,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
- struct kvm *kvm = filp->private_data;
struct kvm_ppc_smmu_info info;
memset(&info, 0, sizeof(info));
@@ -995,6 +1072,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
break;
}
+ case KVM_PPC_RTAS_DEFINE_TOKEN: {
+ struct kvm *kvm = filp->private_data;
+
+ r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
+ break;
+ }
#endif /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index d7efdbf640c7..4b921affa495 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -68,7 +68,11 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
- if (is_hugepd(pmdp)) {
+ if (pmd_huge(pmd)) {
+ if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (is_hugepd(pmdp)) {
if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
addr, next, write, pages, nr))
return 0;
@@ -92,7 +96,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
- if (is_hugepd(pudp)) {
+ if (pud_huge(pud)) {
+ if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (is_hugepd(pudp)) {
if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
addr, next, write, pages, nr))
return 0;
@@ -153,7 +161,11 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
- if (is_hugepd(pgdp)) {
+ if (pgd_huge(pgd)) {
+ if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
+ write, pages, &nr))
+ goto slow;
+ } else if (is_hugepd(pgdp)) {
if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
addr, next, write, pages, &nr))
goto slow;
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 7443481a315c..0e980acae67c 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -196,7 +196,8 @@ htab_insert_pte:
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1)
bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -219,7 +220,8 @@ _GLOBAL(htab_call_hpte_insert1)
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2)
bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -490,7 +492,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
beq htab_inval_old_hpte
ld r6,STK_PARAM(R6)(r1)
- ori r26,r6,0x8000 /* Load the hidx mask */
+ ori r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */
ld r26,0(r26)
addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
@@ -515,7 +517,8 @@ htab_special_pfn:
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -542,7 +545,8 @@ _GLOBAL(htab_call_hpte_insert1)
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -607,7 +611,7 @@ htab_pte_insert_ok:
sld r4,r4,r5
andc r26,r26,r4
or r26,r26,r3
- ori r5,r6,0x8000
+ ori r5,r6,PTE_PAGE_HIDX_OFFSET
std r26,0(r5)
lwsync
std r30,0(r6)
@@ -840,7 +844,8 @@ ht64_insert_pte:
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_64K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert1)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -863,7 +868,8 @@ _GLOBAL(ht64_call_hpte_insert1)
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_64K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert2)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ffc1e00f7a22..6a2aead5b0e5 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,7 +39,7 @@
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbie(unsigned long vpn, int psize, int ssize)
+static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long va;
unsigned int penc;
@@ -61,17 +61,31 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
switch (psize) {
case MMU_PAGE_4K:
+ /* clear out bits after (52) [0....52.....63] */
+ va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
+ va |= mmu_psize_defs[apsize].sllp << 6;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
- penc = mmu_psize_defs[psize].penc;
- va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+ penc = mmu_psize_defs[psize].penc[apsize];
+ va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
+ /* Add AVAL part */
+ if (psize != apsize) {
+ /*
+ * MPSS, 64K base page size and 16MB parge page size
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe);
+ }
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -80,7 +94,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
}
}
-static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
+static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long va;
unsigned int penc;
@@ -96,16 +110,30 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
switch (psize) {
case MMU_PAGE_4K:
+ /* clear out bits after(52) [0....52.....63] */
+ va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
+ va |= mmu_psize_defs[apsize].sllp << 6;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
- penc = mmu_psize_defs[psize].penc;
- va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+ penc = mmu_psize_defs[psize].penc[apsize];
+ va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
+ /* Add AVAL part */
+ if (psize != apsize) {
+ /*
+ * MPSS, 64K base page size and 16MB parge page size
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe);
+ }
va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
@@ -114,7 +142,8 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
}
-static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
+static inline void tlbie(unsigned long vpn, int psize, int apsize,
+ int ssize, int local)
{
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -125,10 +154,10 @@ static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
- __tlbiel(vpn, psize, ssize);
+ __tlbiel(vpn, psize, apsize, ssize);
asm volatile("ptesync": : :"memory");
} else {
- __tlbie(vpn, psize, ssize);
+ __tlbie(vpn, psize, apsize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@@ -156,7 +185,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags,
- unsigned long vflags, int psize, int ssize)
+ unsigned long vflags, int psize, int apsize, int ssize)
{
struct hash_pte *hptep = htab_address + hpte_group;
unsigned long hpte_v, hpte_r;
@@ -183,8 +212,8 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
if (i == HPTES_PER_GROUP)
return -1;
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
@@ -244,6 +273,51 @@ static long native_hpte_remove(unsigned long hpte_group)
return i;
}
+static inline int __hpte_actual_psize(unsigned int lp, int psize)
+{
+ int i, shift;
+ unsigned int mask;
+
+ /* start from 1 ignoring MMU_PAGE_4K */
+ for (i = 1; i < MMU_PAGE_COUNT; i++) {
+
+ /* invalid penc */
+ if (mmu_psize_defs[psize].penc[i] == -1)
+ continue;
+ /*
+ * encoding bits per actual page size
+ * PTE LP actual page size
+ * rrrr rrrz >=8KB
+ * rrrr rrzz >=16KB
+ * rrrr rzzz >=32KB
+ * rrrr zzzz >=64KB
+ * .......
+ */
+ shift = mmu_psize_defs[i].shift - LP_SHIFT;
+ if (shift > LP_BITS)
+ shift = LP_BITS;
+ mask = (1 << shift) - 1;
+ if ((lp & mask) == mmu_psize_defs[psize].penc[i])
+ return i;
+ }
+ return -1;
+}
+
+static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
+{
+ /* Look at the 8 bit LP value */
+ unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+
+ if (!(hptep->v & HPTE_V_VALID))
+ return -1;
+
+ /* First check if it is large page */
+ if (!(hptep->v & HPTE_V_LARGE))
+ return MMU_PAGE_4K;
+
+ return __hpte_actual_psize(lp, psize);
+}
+
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long vpn, int psize, int ssize,
int local)
@@ -251,8 +325,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
int ret = 0;
+ int actual_psize;
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
@@ -260,9 +335,13 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte(hptep);
hpte_v = hptep->v;
-
+ actual_psize = hpte_actual_psize(hptep, psize);
+ if (actual_psize < 0) {
+ native_unlock_hpte(hptep);
+ return -1;
+ }
/* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
+ if (!HPTE_V_COMPARE(hpte_v, want_v)) {
DBG_LOW(" -> miss\n");
ret = -1;
} else {
@@ -274,7 +353,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
- tlbie(vpn, psize, ssize, local);
+ tlbie(vpn, psize, actual_psize, ssize, local);
return ret;
}
@@ -288,7 +367,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
unsigned long want_v, hpte_v;
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -315,6 +394,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
+ int actual_psize;
unsigned long vpn;
unsigned long vsid;
long slot;
@@ -327,13 +407,16 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
+ actual_psize = hpte_actual_psize(hptep, psize);
+ if (actual_psize < 0)
+ return;
/* Update the HPTE */
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N));
/* Ensure it is out of the tlb too. */
- tlbie(vpn, psize, ssize, 0);
+ tlbie(vpn, psize, actual_psize, ssize, 0);
}
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
@@ -343,64 +426,60 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
unsigned long hpte_v;
unsigned long want_v;
unsigned long flags;
+ int actual_psize;
local_irq_save(flags);
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
+ actual_psize = hpte_actual_psize(hptep, psize);
+ if (actual_psize < 0) {
+ native_unlock_hpte(hptep);
+ local_irq_restore(flags);
+ return;
+ }
/* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
+ if (!HPTE_V_COMPARE(hpte_v, want_v))
native_unlock_hpte(hptep);
else
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->v = 0;
/* Invalidate the TLB */
- tlbie(vpn, psize, ssize, local);
+ tlbie(vpn, psize, actual_psize, ssize, local);
local_irq_restore(flags);
}
-#define LP_SHIFT 12
-#define LP_BITS 8
-#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
-
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
- int *psize, int *ssize, unsigned long *vpn)
+ int *psize, int *apsize, int *ssize, unsigned long *vpn)
{
unsigned long avpn, pteg, vpi;
- unsigned long hpte_r = hpte->r;
unsigned long hpte_v = hpte->v;
unsigned long vsid, seg_off;
- int i, size, shift, penc;
+ int size, a_size, shift;
+ /* Look at the 8 bit LP value */
+ unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
- if (!(hpte_v & HPTE_V_LARGE))
- size = MMU_PAGE_4K;
- else {
- for (i = 0; i < LP_BITS; i++) {
- if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
- break;
- }
- penc = LP_MASK(i+1) >> LP_SHIFT;
+ if (!(hpte_v & HPTE_V_LARGE)) {
+ size = MMU_PAGE_4K;
+ a_size = MMU_PAGE_4K;
+ } else {
for (size = 0; size < MMU_PAGE_COUNT; size++) {
- /* 4K pages are not represented by LP */
- if (size == MMU_PAGE_4K)
- continue;
-
/* valid entries have a shift value */
if (!mmu_psize_defs[size].shift)
continue;
- if (penc == mmu_psize_defs[size].penc)
+ a_size = __hpte_actual_psize(lp, size);
+ if (a_size != -1)
break;
}
}
-
/* This works for all page sizes, and for 256M and 1T segments */
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
@@ -433,7 +512,8 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
default:
*vpn = size = 0;
}
- *psize = size;
+ *psize = size;
+ *apsize = a_size;
}
/*
@@ -451,7 +531,7 @@ static void native_hpte_clear(void)
struct hash_pte *hptep = htab_address;
unsigned long hpte_v;
unsigned long pteg_count;
- int psize, ssize;
+ int psize, apsize, ssize;
pteg_count = htab_hash_mask + 1;
@@ -477,9 +557,9 @@ static void native_hpte_clear(void)
* already hold the native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
- hpte_decode(hptep, slot, &psize, &ssize, &vpn);
+ hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
hptep->v = 0;
- __tlbie(vpn, psize, ssize);
+ __tlbie(vpn, psize, apsize, ssize);
}
}
@@ -520,7 +600,7 @@ static void native_flush_hash_range(unsigned long number, int local)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -540,7 +620,7 @@ static void native_flush_hash_range(unsigned long number, int local)
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
- __tlbiel(vpn, psize, ssize);
+ __tlbiel(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("ptesync":::"memory");
@@ -557,7 +637,7 @@ static void native_flush_hash_range(unsigned long number, int local)
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
- __tlbie(vpn, psize, ssize);
+ __tlbie(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f410c3e12c1e..88ac0eeaadde 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -126,7 +126,7 @@ static struct mmu_psize_def mmu_psize_defaults_old[] = {
[MMU_PAGE_4K] = {
.shift = 12,
.sllp = 0,
- .penc = 0,
+ .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
.avpnm = 0,
.tlbiel = 0,
},
@@ -140,14 +140,15 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
[MMU_PAGE_4K] = {
.shift = 12,
.sllp = 0,
- .penc = 0,
+ .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
.avpnm = 0,
.tlbiel = 1,
},
[MMU_PAGE_16M] = {
.shift = 24,
.sllp = SLB_VSID_L,
- .penc = 0,
+ .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
+ [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
.avpnm = 0x1UL,
.tlbiel = 0,
},
@@ -209,7 +210,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
BUG_ON(!ppc_md.hpte_insert);
ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
- HPTE_V_BOLTED, psize, ssize);
+ HPTE_V_BOLTED, psize, psize, ssize);
if (ret < 0)
break;
@@ -276,6 +277,30 @@ static void __init htab_init_seg_sizes(void)
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
}
+static int __init get_idx_from_shift(unsigned int shift)
+{
+ int idx = -1;
+
+ switch (shift) {
+ case 0xc:
+ idx = MMU_PAGE_4K;
+ break;
+ case 0x10:
+ idx = MMU_PAGE_64K;
+ break;
+ case 0x14:
+ idx = MMU_PAGE_1M;
+ break;
+ case 0x18:
+ idx = MMU_PAGE_16M;
+ break;
+ case 0x22:
+ idx = MMU_PAGE_16G;
+ break;
+ }
+ return idx;
+}
+
static int __init htab_dt_scan_page_sizes(unsigned long node,
const char *uname, int depth,
void *data)
@@ -291,64 +316,65 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
prop = (u32 *)of_get_flat_dt_prop(node,
"ibm,segment-page-sizes", &size);
if (prop != NULL) {
- DBG("Page sizes from device-tree:\n");
+ pr_info("Page sizes from device-tree:\n");
size /= 4;
cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
while(size > 0) {
- unsigned int shift = prop[0];
+ unsigned int base_shift = prop[0];
unsigned int slbenc = prop[1];
unsigned int lpnum = prop[2];
- unsigned int lpenc = 0;
struct mmu_psize_def *def;
- int idx = -1;
+ int idx, base_idx;
size -= 3; prop += 3;
- while(size > 0 && lpnum) {
- if (prop[0] == shift)
- lpenc = prop[1];
- prop += 2; size -= 2;
- lpnum--;
+ base_idx = get_idx_from_shift(base_shift);
+ if (base_idx < 0) {
+ /*
+ * skip the pte encoding also
+ */
+ prop += lpnum * 2; size -= lpnum * 2;
+ continue;
}
- switch(shift) {
- case 0xc:
- idx = MMU_PAGE_4K;
- break;
- case 0x10:
- idx = MMU_PAGE_64K;
- break;
- case 0x14:
- idx = MMU_PAGE_1M;
- break;
- case 0x18:
- idx = MMU_PAGE_16M;
+ def = &mmu_psize_defs[base_idx];
+ if (base_idx == MMU_PAGE_16M)
cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
- break;
- case 0x22:
- idx = MMU_PAGE_16G;
- break;
- }
- if (idx < 0)
- continue;
- def = &mmu_psize_defs[idx];
- def->shift = shift;
- if (shift <= 23)
+
+ def->shift = base_shift;
+ if (base_shift <= 23)
def->avpnm = 0;
else
- def->avpnm = (1 << (shift - 23)) - 1;
+ def->avpnm = (1 << (base_shift - 23)) - 1;
def->sllp = slbenc;
- def->penc = lpenc;
- /* We don't know for sure what's up with tlbiel, so
+ /*
+ * We don't know for sure what's up with tlbiel, so
* for now we only set it for 4K and 64K pages
*/
- if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
+ if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
def->tlbiel = 1;
else
def->tlbiel = 0;
- DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, "
- "tlbiel=%d, penc=%d\n",
- idx, shift, def->sllp, def->avpnm, def->tlbiel,
- def->penc);
+ while (size > 0 && lpnum) {
+ unsigned int shift = prop[0];
+ int penc = prop[1];
+
+ prop += 2; size -= 2;
+ lpnum--;
+
+ idx = get_idx_from_shift(shift);
+ if (idx < 0)
+ continue;
+
+ if (penc == -1)
+ pr_err("Invalid penc for base_shift=%d "
+ "shift=%d\n", base_shift, shift);
+
+ def->penc[idx] = penc;
+ pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
+ " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
+ base_shift, shift, def->sllp,
+ def->avpnm, def->tlbiel, def->penc[idx]);
+ }
}
return 1;
}
@@ -397,10 +423,21 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
}
#endif /* CONFIG_HUGETLB_PAGE */
+static void mmu_psize_set_default_penc(void)
+{
+ int bpsize, apsize;
+ for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
+ for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
+ mmu_psize_defs[bpsize].penc[apsize] = -1;
+}
+
static void __init htab_init_page_sizes(void)
{
int rc;
+ /* se the invalid penc to -1 */
+ mmu_psize_set_default_penc();
+
/* Default to 4K pages only */
memcpy(mmu_psize_defs, mmu_psize_defaults_old,
sizeof(mmu_psize_defaults_old));
@@ -899,14 +936,14 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
- int ssize, int psize, unsigned long pte)
+ int ssize, int psize, int lpsize, unsigned long pte)
{
if (!printk_ratelimit())
return;
pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
ea, access, current->comm);
- pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
- trap, vsid, ssize, psize, pte);
+ pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
+ trap, vsid, ssize, psize, lpsize, pte);
}
/* Result code is:
@@ -1079,7 +1116,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize, psize,
- pte_val(*ptep));
+ psize, pte_val(*ptep));
#ifndef CONFIG_PPC_64K_PAGES
DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
@@ -1157,7 +1194,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize,
- mm->context.user_psize, pte_val(*ptep));
+ mm->context.user_psize,
+ mm->context.user_psize,
+ pte_val(*ptep));
local_irq_restore(flags);
}
@@ -1191,6 +1230,7 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
* unmapping it first, it may see the speculated version.
*/
if (local && cpu_has_feature(CPU_FTR_TM) &&
+ current->thread.regs &&
MSR_TM_ACTIVE(current->thread.regs->msr)) {
tm_enable();
tm_abort(TM_CAUSE_TLBI);
@@ -1230,24 +1270,60 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
bad_page_fault(regs, address, SIGBUS);
}
+long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+ unsigned long pa, unsigned long rflags,
+ unsigned long vflags, int psize, int ssize)
+{
+ unsigned long hpte_group;
+ long slot;
+
+repeat:
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
+ psize, psize, ssize);
+
+ /* Primary is full, try the secondary */
+ if (unlikely(slot == -1)) {
+ hpte_group = ((~hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
+ vflags | HPTE_V_SECONDARY,
+ psize, psize, ssize);
+ if (slot == -1) {
+ if (mftb() & 0x1)
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP)&~0x7UL;
+
+ ppc_md.hpte_remove(hpte_group);
+ goto repeat;
+ }
+ }
+
+ return slot;
+}
+
#ifdef CONFIG_DEBUG_PAGEALLOC
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
- unsigned long hash, hpteg;
+ unsigned long hash;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
- int ret;
+ long ret;
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
- hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Don't create HPTE entries for bad address */
if (!vsid)
return;
- ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
- mode, HPTE_V_BOLTED,
- mmu_linear_psize, mmu_kernel_ssize);
+
+ ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
+ HPTE_V_BOLTED,
+ mmu_linear_psize, mmu_kernel_ssize);
+
BUG_ON (ret < 0);
spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cecad348f604..0f1d94a1fb82 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -14,6 +14,10 @@
#include <asm/cacheflush.h>
#include <asm/machdep.h>
+extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+ unsigned long pa, unsigned long rlags,
+ unsigned long vflags, int psize, int ssize);
+
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize)
@@ -83,14 +87,9 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (likely(!(old_pte & _PAGE_HASHPTE))) {
unsigned long hash = hpt_hash(vpn, shift, ssize);
- unsigned long hpte_group;
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
-repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
-
/* clear HPTE slot informations in new PTE */
#ifdef CONFIG_PPC_64K_PAGES
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
@@ -101,26 +100,8 @@ repeat:
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED));
- /* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- mmu_psize, ssize);
-
- /* Primary is full, try the secondary */
- if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
- HPTE_V_SECONDARY,
- mmu_psize, ssize);
- if (slot == -1) {
- if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
-
- ppc_md.hpte_remove(hpte_group);
- goto repeat;
- }
- }
+ slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
+ mmu_psize, ssize);
/*
* Hypervisor failure. Restore old pte and return -1
@@ -129,7 +110,7 @@ repeat:
if (unlikely(slot == -2)) {
*ptep = __pte(old_pte);
hash_failure_debug(ea, access, vsid, trap, ssize,
- mmu_psize, old_pte);
+ mmu_psize, mmu_psize, old_pte);
return -1;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1a6de0a7d8eb..237c8e5f2640 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -48,30 +48,71 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#endif
-static inline int shift_to_mmu_psize(unsigned int shift)
+#define hugepd_none(hpd) ((hpd).pd == 0)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * At this point we do the placement change only for BOOK3S 64. This would
+ * possibly work on other subarchs.
+ */
+
+/*
+ * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
+ * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ */
+int pmd_huge(pmd_t pmd)
{
- int psize;
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pmd_val(pmd) & 0x3) != 0x0);
+}
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
- if (mmu_psize_defs[psize].shift == shift)
- return psize;
- return -1;
+int pud_huge(pud_t pud)
+{
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pud_val(pud) & 0x3) != 0x0);
}
-static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+int pgd_huge(pgd_t pgd)
{
- if (mmu_psize_defs[mmu_psize].shift)
- return mmu_psize_defs[mmu_psize].shift;
- BUG();
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pgd_val(pgd) & 0x3) != 0x0);
+}
+#else
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
}
-#define hugepd_none(hpd) ((hpd).pd == 0)
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+int pgd_huge(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+/*
+ * We have 4 cases for pgds and pmds:
+ * (1) invalid (all zeroes)
+ * (2) pointer to next table, as normal; bottom 6 bits == 0
+ * (3) leaf pte for huge page, bottom two bits != 00
+ * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
+ */
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
pgd_t *pg;
pud_t *pu;
pmd_t *pm;
+ pte_t *ret_pte;
hugepd_t *hpdp = NULL;
unsigned pdshift = PGDIR_SHIFT;
@@ -79,30 +120,43 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = 0;
pg = pgdir + pgd_index(ea);
- if (is_hugepd(pg)) {
+
+ if (pgd_huge(*pg)) {
+ ret_pte = (pte_t *) pg;
+ goto out;
+ } else if (is_hugepd(pg))
hpdp = (hugepd_t *)pg;
- } else if (!pgd_none(*pg)) {
+ else if (!pgd_none(*pg)) {
pdshift = PUD_SHIFT;
pu = pud_offset(pg, ea);
- if (is_hugepd(pu))
+
+ if (pud_huge(*pu)) {
+ ret_pte = (pte_t *) pu;
+ goto out;
+ } else if (is_hugepd(pu))
hpdp = (hugepd_t *)pu;
else if (!pud_none(*pu)) {
pdshift = PMD_SHIFT;
pm = pmd_offset(pu, ea);
- if (is_hugepd(pm))
+
+ if (pmd_huge(*pm)) {
+ ret_pte = (pte_t *) pm;
+ goto out;
+ } else if (is_hugepd(pm))
hpdp = (hugepd_t *)pm;
- else if (!pmd_none(*pm)) {
+ else if (!pmd_none(*pm))
return pte_offset_kernel(pm, ea);
- }
}
}
-
if (!hpdp)
return NULL;
+ ret_pte = hugepte_offset(hpdp, ea, pdshift);
+ pdshift = hugepd_shift(*hpdp);
+out:
if (shift)
- *shift = hugepd_shift(*hpdp);
- return hugepte_offset(hpdp, ea, pdshift);
+ *shift = pdshift;
+ return ret_pte;
}
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
@@ -145,6 +199,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (unlikely(!hugepd_none(*hpdp)))
break;
else
+ /* We use the old format for PPC_FSL_BOOK3E */
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
}
/* If we bailed from the for loop early, an error occurred, clean up */
@@ -156,9 +211,15 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#else
if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new);
- else
+ else {
+#ifdef CONFIG_PPC_BOOK3S_64
+ hpdp->pd = (unsigned long)new |
+ (shift_to_mmu_psize(pshift) << 2);
+#else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif
+ }
+#endif
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -175,6 +236,61 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * At this point we do the placement change only for BOOK3S 64. This would
+ * possibly work on other subarchs.
+ */
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
+{
+ pgd_t *pg;
+ pud_t *pu;
+ pmd_t *pm;
+ hugepd_t *hpdp = NULL;
+ unsigned pshift = __ffs(sz);
+ unsigned pdshift = PGDIR_SHIFT;
+
+ addr &= ~(sz-1);
+ pg = pgd_offset(mm, addr);
+
+ if (pshift == PGDIR_SHIFT)
+ /* 16GB huge page */
+ return (pte_t *) pg;
+ else if (pshift > PUD_SHIFT)
+ /*
+ * We need to use hugepd table
+ */
+ hpdp = (hugepd_t *)pg;
+ else {
+ pdshift = PUD_SHIFT;
+ pu = pud_alloc(mm, pg, addr);
+ if (pshift == PUD_SHIFT)
+ return (pte_t *)pu;
+ else if (pshift > PMD_SHIFT)
+ hpdp = (hugepd_t *)pu;
+ else {
+ pdshift = PMD_SHIFT;
+ pm = pmd_alloc(mm, pu, addr);
+ if (pshift == PMD_SHIFT)
+ /* 16MB hugepage */
+ return (pte_t *)pm;
+ else
+ hpdp = (hugepd_t *)pm;
+ }
+ }
+ if (!hpdp)
+ return NULL;
+
+ BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
+
+ if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
+ return NULL;
+
+ return hugepte_offset(hpdp, addr, pdshift);
+}
+
+#else
+
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
pgd_t *pg;
@@ -212,6 +328,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(hpdp, addr, pdshift);
}
+#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early
@@ -475,7 +592,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd))
+ if (pmd_none_or_clear_bad(pmd))
continue;
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
@@ -628,16 +745,6 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
return page;
}
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
@@ -646,8 +753,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
}
-static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
+int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
unsigned long pte_end;
@@ -742,7 +849,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
- return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
+ return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
}
#endif
@@ -883,11 +990,16 @@ static int __init hugetlbpage_init(void)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
-
- pgtable_cache_add(pdshift - shift, NULL);
- if (!PGT_CACHE(pdshift - shift))
- panic("hugetlbpage_init(): could not create "
- "pgtable cache for %d bit pagesize\n", shift);
+ /*
+ * if we have pdshift and shift value same, we don't
+ * use pgt cache for hugepd.
+ */
+ if (pdshift != shift) {
+ pgtable_cache_add(pdshift - shift, NULL);
+ if (!PGT_CACHE(pdshift - shift))
+ panic("hugetlbpage_init(): could not create "
+ "pgtable cache for %d bit pagesize\n", shift);
+ }
}
/* Set default large page size. Currently, we pick 16M or 1M
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 8cdbd8634a58..915412e4d5ba 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -67,7 +67,7 @@
void switch_cop(struct mm_struct *next)
{
-#ifdef CONFIG_ICSWX_PID
+#ifdef CONFIG_PPC_ICSWX_PID
mtspr(SPRN_PID, next->context.cop_pid);
#endif
mtspr(SPRN_ACOP, next->context.acop);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7e2246fb2f31..c2787bf779ca 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -129,8 +129,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
- PGT_CACHE(shift) = new;
-
+ pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
@@ -263,19 +262,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmemmap_list = vmem_back;
}
-int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long start = (unsigned long)start_page;
- unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);
- pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
- start_page, nr_pages, node);
- pr_debug(" -> map %lx..%lx\n", start, end);
+ pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
void *p;
@@ -298,7 +292,7 @@ int __meminit vmemmap_populate(struct page *start_page,
return 0;
}
-void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void vmemmap_free(unsigned long start, unsigned long end)
{
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f1f7409a4183..0988a26e0413 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -66,10 +66,9 @@ unsigned long long memory_limit;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
+EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
-
EXPORT_SYMBOL(kmap_prot);
-EXPORT_SYMBOL(kmap_pte);
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
@@ -352,13 +351,9 @@ void __init mem_init(void)
struct page *page = pfn_to_page(pfn);
if (memblock_is_reserved(paddr))
continue;
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
+ free_highmem_page(page);
reservedpages--;
}
- totalram_pages += totalhigh_pages;
printk(KERN_DEBUG "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
}
@@ -405,39 +400,14 @@ void __init mem_init(void)
void free_initmem(void)
{
- unsigned long addr;
-
ppc_md.progress = ppc_printk_progress;
-
- addr = (unsigned long)__init_begin;
- for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- pr_info("Freeing unused kernel memory: %luk freed\n",
- ((unsigned long)__init_end -
- (unsigned long)__init_begin) >> 10);
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
-
- start = _ALIGN_DOWN(start, PAGE_SIZE);
- end = _ALIGN_UP(end, PAGE_SIZE);
- pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index d1d1b92c5b99..178876aef40f 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
#include "icswx.h"
@@ -85,6 +86,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */
+#ifdef CONFIG_PPC_64K_PAGES
+ mm->context.pte_frag = NULL;
+#endif
return 0;
}
@@ -96,13 +100,46 @@ void __destroy_context(int context_id)
}
EXPORT_SYMBOL_GPL(__destroy_context);
+#ifdef CONFIG_PPC_64K_PAGES
+static void destroy_pagetable_page(struct mm_struct *mm)
+{
+ int count;
+ void *pte_frag;
+ struct page *page;
+
+ pte_frag = mm->context.pte_frag;
+ if (!pte_frag)
+ return;
+
+ page = virt_to_page(pte_frag);
+ /* drop all the pending references */
+ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
+ /* We allow PTE_FRAG_NR fragments from a PTE page */
+ count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count);
+ if (!count) {
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+}
+
+#else
+static inline void destroy_pagetable_page(struct mm_struct *mm)
+{
+ return;
+}
+#endif
+
+
void destroy_context(struct mm_struct *mm)
{
+
#ifdef CONFIG_PPC_ICSWX
drop_cop(mm->context.acop, mm);
kfree(mm->context.cop_lockp);
mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
+
+ destroy_pagetable_page(mm);
__destroy_context(mm->context.id);
subpage_prot_free(mm);
mm->context.id = MMU_NO_CONTEXT;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca2b4d7..88c0425dc0a8 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -22,6 +22,11 @@
#include <linux/pfn.h>
#include <linux/cpuset.h>
#include <linux/node.h>
+#include <linux/stop_machine.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
@@ -29,6 +34,7 @@
#include <asm/paca.h>
#include <asm/hvcall.h>
#include <asm/setup.h>
+#include <asm/vdso.h>
static int numa_enabled = 1;
@@ -62,14 +68,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
*/
static void __init setup_node_to_cpumask_map(void)
{
- unsigned int node, num = 0;
+ unsigned int node;
/* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES) {
- for_each_node_mask(node, node_possible_map)
- num = node;
- nr_node_ids = num + 1;
- }
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
/* allocate the map */
for (node = 0; node < nr_node_ids; node++)
@@ -79,7 +82,7 @@ static void __init setup_node_to_cpumask_map(void)
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}
-static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
+static int __init fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
unsigned long long mem;
@@ -201,7 +204,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
- return distance;
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
@@ -291,9 +294,7 @@ EXPORT_SYMBOL_GPL(of_node_to_nid);
static int __init find_min_common_depth(void)
{
int depth;
- struct device_node *chosen;
struct device_node *root;
- const char *vec5;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
@@ -325,24 +326,10 @@ static int __init find_min_common_depth(void)
distance_ref_points_depth /= sizeof(int);
-#define VEC5_AFFINITY_BYTE 5
-#define VEC5_AFFINITY 0x80
-
- if (firmware_has_feature(FW_FEATURE_OPAL))
+ if (firmware_has_feature(FW_FEATURE_OPAL) ||
+ firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
+ dbg("Using form 1 affinity\n");
form1_affinity = 1;
- else {
- chosen = of_find_node_by_path("/chosen");
- if (chosen) {
- vec5 = of_get_property(chosen,
- "ibm,architecture-vec-5", NULL);
- if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
- VEC5_AFFINITY)) {
- dbg("Using form 1 affinity\n");
- form1_affinity = 1;
- }
-
- of_node_put(chosen);
- }
}
if (form1_affinity) {
@@ -1270,10 +1257,18 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
+struct topology_update_data {
+ struct topology_update_data *next;
+ unsigned int cpu;
+ int old_nid;
+ int new_nid;
+};
+
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
-static void set_topology_timer(void);
+static int prrn_enabled;
+static void reset_topology_timer(void);
/*
* Store the current values of the associativity change counters in the
@@ -1309,11 +1304,9 @@ static void setup_cpu_associativity_change_counters(void)
*/
static int update_cpu_associativity_changes_mask(void)
{
- int cpu, nr_cpus = 0;
+ int cpu;
cpumask_t *changes = &cpu_associativity_changes_mask;
- cpumask_clear(changes);
-
for_each_possible_cpu(cpu) {
int i, changed = 0;
u8 *counts = vphn_cpu_change_counts[cpu];
@@ -1327,11 +1320,10 @@ static int update_cpu_associativity_changes_mask(void)
}
if (changed) {
cpumask_set_cpu(cpu, changes);
- nr_cpus++;
}
}
- return nr_cpus;
+ return cpumask_weight(changes);
}
/*
@@ -1423,40 +1415,84 @@ static long vphn_get_associativity(unsigned long cpu,
}
/*
+ * Update the CPU maps and sysfs entries for a single CPU when its NUMA
+ * characteristics change. This function doesn't perform any locking and is
+ * only safe to call from stop_machine().
+ */
+static int update_cpu_topology(void *data)
+{
+ struct topology_update_data *update;
+ unsigned long cpu;
+
+ if (!data)
+ return -EINVAL;
+
+ cpu = get_cpu();
+
+ for (update = data; update; update = update->next) {
+ if (cpu != update->cpu)
+ continue;
+
+ unregister_cpu_under_node(update->cpu, update->old_nid);
+ unmap_cpu_from_node(update->cpu);
+ map_cpu_to_node(update->cpu, update->new_nid);
+ vdso_getcpu_init();
+ register_cpu_under_node(update->cpu, update->new_nid);
+ }
+
+ return 0;
+}
+
+/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- int cpu, nid, old_nid, changed = 0;
+ unsigned int cpu, changed = 0;
+ struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+ cpumask_t updated_cpus;
struct device *dev;
+ int weight, i = 0;
+
+ weight = cpumask_weight(&cpu_associativity_changes_mask);
+ if (!weight)
+ return 0;
+
+ updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
+ if (!updates)
+ return 0;
- for_each_cpu(cpu,&cpu_associativity_changes_mask) {
+ cpumask_clear(&updated_cpus);
+
+ for_each_cpu(cpu, &cpu_associativity_changes_mask) {
+ ud = &updates[i++];
+ ud->cpu = cpu;
vphn_get_associativity(cpu, associativity);
- nid = associativity_to_nid(associativity);
+ ud->new_nid = associativity_to_nid(associativity);
- if (nid < 0 || !node_online(nid))
- nid = first_online_node;
+ if (ud->new_nid < 0 || !node_online(ud->new_nid))
+ ud->new_nid = first_online_node;
- old_nid = numa_cpu_lookup_table[cpu];
+ ud->old_nid = numa_cpu_lookup_table[cpu];
+ cpumask_set_cpu(cpu, &updated_cpus);
- /* Disable hotplug while we update the cpu
- * masks and sysfs.
- */
- get_online_cpus();
- unregister_cpu_under_node(cpu, old_nid);
- unmap_cpu_from_node(cpu);
- map_cpu_to_node(cpu, nid);
- register_cpu_under_node(cpu, nid);
- put_online_cpus();
-
- dev = get_cpu_device(cpu);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+
+ stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+
+ for (ud = &updates[0]; ud; ud = ud->next) {
+ dev = get_cpu_device(ud->cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
changed = 1;
}
+ kfree(updates);
return changed;
}
@@ -1473,49 +1509,165 @@ void topology_schedule_update(void)
static void topology_timer_fn(unsigned long ignored)
{
- if (!vphn_enabled)
- return;
- if (update_cpu_associativity_changes_mask() > 0)
+ if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
topology_schedule_update();
- set_topology_timer();
+ else if (vphn_enabled) {
+ if (update_cpu_associativity_changes_mask() > 0)
+ topology_schedule_update();
+ reset_topology_timer();
+ }
}
static struct timer_list topology_timer =
TIMER_INITIALIZER(topology_timer_fn, 0, 0);
-static void set_topology_timer(void)
+static void reset_topology_timer(void)
{
topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ;
- add_timer(&topology_timer);
+ mod_timer(&topology_timer, topology_timer.expires);
+}
+
+#ifdef CONFIG_SMP
+
+static void stage_topology_update(int core_id)
+{
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
+ reset_topology_timer();
}
+static int dt_update_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct of_prop_reconfig *update;
+ int rc = NOTIFY_DONE;
+
+ switch (action) {
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ update = (struct of_prop_reconfig *)data;
+ if (!of_prop_cmp(update->dn->type, "cpu") &&
+ !of_prop_cmp(update->prop->name, "ibm,associativity")) {
+ u32 core_id;
+ of_property_read_u32(update->dn, "reg", &core_id);
+ stage_topology_update(core_id);
+ rc = NOTIFY_OK;
+ }
+ break;
+ }
+
+ return rc;
+}
+
+static struct notifier_block dt_update_nb = {
+ .notifier_call = dt_update_callback,
+};
+
+#endif
+
/*
- * Start polling for VPHN associativity changes.
+ * Start polling for associativity changes.
*/
int start_topology_update(void)
{
int rc = 0;
- /* Disabled until races with load balancing are fixed */
- if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
- get_lppaca()->shared_proc) {
- vphn_enabled = 1;
- setup_cpu_associativity_change_counters();
- init_timer_deferrable(&topology_timer);
- set_topology_timer();
- rc = 1;
+ if (firmware_has_feature(FW_FEATURE_PRRN)) {
+ if (!prrn_enabled) {
+ prrn_enabled = 1;
+ vphn_enabled = 0;
+#ifdef CONFIG_SMP
+ rc = of_reconfig_notifier_register(&dt_update_nb);
+#endif
+ }
+ } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
+ get_lppaca()->shared_proc) {
+ if (!vphn_enabled) {
+ prrn_enabled = 0;
+ vphn_enabled = 1;
+ setup_cpu_associativity_change_counters();
+ init_timer_deferrable(&topology_timer);
+ reset_topology_timer();
+ }
}
return rc;
}
-__initcall(start_topology_update);
/*
* Disable polling for VPHN associativity changes.
*/
int stop_topology_update(void)
{
- vphn_enabled = 0;
- return del_timer_sync(&topology_timer);
+ int rc = 0;
+
+ if (prrn_enabled) {
+ prrn_enabled = 0;
+#ifdef CONFIG_SMP
+ rc = of_reconfig_notifier_unregister(&dt_update_nb);
+#endif
+ } else if (vphn_enabled) {
+ vphn_enabled = 0;
+ rc = del_timer_sync(&topology_timer);
+ }
+
+ return rc;
+}
+
+int prrn_is_enabled(void)
+{
+ return prrn_enabled;
+}
+
+static int topology_read(struct seq_file *file, void *v)
+{
+ if (vphn_enabled || prrn_enabled)
+ seq_puts(file, "on\n");
+ else
+ seq_puts(file, "off\n");
+
+ return 0;
+}
+
+static int topology_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, topology_read, NULL);
+}
+
+static ssize_t topology_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ char kbuf[4]; /* "on" or "off" plus null. */
+ int read_len;
+
+ read_len = count < 3 ? count : 3;
+ if (copy_from_user(kbuf, buf, read_len))
+ return -EINVAL;
+
+ kbuf[read_len] = '\0';
+
+ if (!strncmp(kbuf, "on", 2))
+ start_topology_update();
+ else if (!strncmp(kbuf, "off", 3))
+ stop_topology_update();
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations topology_ops = {
+ .read = seq_read,
+ .write = topology_write,
+ .open = topology_open,
+ .release = single_release
+};
+
+static int topology_update_init(void)
+{
+ start_topology_update();
+ proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
+
+ return 0;
}
+device_initcall(topology_update_init);
#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 654258f165ae..a854096e1023 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -337,3 +337,121 @@ EXPORT_SYMBOL(__ioremap_at);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__iounmap_at);
+
+#ifdef CONFIG_PPC_64K_PAGES
+static pte_t *get_from_cache(struct mm_struct *mm)
+{
+ void *pte_frag, *ret;
+
+ spin_lock(&mm->page_table_lock);
+ ret = mm->context.pte_frag;
+ if (ret) {
+ pte_frag = ret + PTE_FRAG_SIZE;
+ /*
+ * If we have taken up all the fragments mark PTE page NULL
+ */
+ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
+ pte_frag = NULL;
+ mm->context.pte_frag = pte_frag;
+ }
+ spin_unlock(&mm->page_table_lock);
+ return (pte_t *)ret;
+}
+
+static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
+{
+ void *ret = NULL;
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+ __GFP_REPEAT | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ ret = page_address(page);
+ spin_lock(&mm->page_table_lock);
+ /*
+ * If we find pgtable_page set, we return
+ * the allocated page with single fragement
+ * count.
+ */
+ if (likely(!mm->context.pte_frag)) {
+ atomic_set(&page->_count, PTE_FRAG_NR);
+ mm->context.pte_frag = ret + PTE_FRAG_SIZE;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ if (!kernel)
+ pgtable_page_ctor(page);
+
+ return (pte_t *)ret;
+}
+
+pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+{
+ pte_t *pte;
+
+ pte = get_from_cache(mm);
+ if (pte)
+ return pte;
+
+ return __alloc_for_cache(mm, kernel);
+}
+
+void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
+{
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ if (!kernel)
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void page_table_free_rcu(void *table)
+{
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ if (!shift)
+ /* PTE page needs special handling */
+ page_table_free_rcu(table);
+ else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
+}
+#else
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ if (!shift) {
+ /* PTE page needs special handling */
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+ } else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
+}
+#endif
+#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index cf9dada734b6..3e99c149271a 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -237,134 +237,112 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
#endif
}
+/*
+ * Compute which slice addr is part of;
+ * set *boundary_addr to the start or end boundary of that slice
+ * (depending on 'end' parameter);
+ * return boolean indicating if the slice is marked as available in the
+ * 'available' slice_mark.
+ */
+static bool slice_scan_available(unsigned long addr,
+ struct slice_mask available,
+ int end,
+ unsigned long *boundary_addr)
+{
+ unsigned long slice;
+ if (addr < SLICE_LOW_TOP) {
+ slice = GET_LOW_SLICE_INDEX(addr);
+ *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
+ return !!(available.low_slices & (1u << slice));
+ } else {
+ slice = GET_HIGH_SLICE_INDEX(addr);
+ *boundary_addr = (slice + end) ?
+ ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
+ return !!(available.high_slices & (1u << slice));
+ }
+}
+
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize, int use_cache)
+ int psize)
{
- struct vm_area_struct *vma;
- unsigned long start_addr, addr;
- struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-
- if (use_cache) {
- if (len <= mm->cached_hole_size) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- } else
- start_addr = addr = mm->free_area_cache;
- } else
- start_addr = addr = TASK_UNMAPPED_BASE;
-
-full_search:
- for (;;) {
- addr = _ALIGN_UP(addr, 1ul << pshift);
- if ((TASK_SIZE - len) < addr)
- break;
- vma = find_vma(mm, addr);
- BUG_ON(vma && (addr >= vma->vm_end));
-
- mask = slice_range_to_mask(addr, len);
- if (!slice_check_fit(mask, available)) {
- if (addr < SLICE_LOW_TOP)
- addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
- else
- addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ unsigned long addr, found, next_end;
+ struct vm_unmapped_area_info info;
+
+ info.flags = 0;
+ info.length = len;
+ info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
+ info.align_offset = 0;
+
+ addr = TASK_UNMAPPED_BASE;
+ while (addr < TASK_SIZE) {
+ info.low_limit = addr;
+ if (!slice_scan_available(addr, available, 1, &addr))
continue;
+
+ next_slice:
+ /*
+ * At this point [info.low_limit; addr) covers
+ * available slices only and ends at a slice boundary.
+ * Check if we need to reduce the range, or if we can
+ * extend it to cover the next available slice.
+ */
+ if (addr >= TASK_SIZE)
+ addr = TASK_SIZE;
+ else if (slice_scan_available(addr, available, 1, &next_end)) {
+ addr = next_end;
+ goto next_slice;
}
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Remember the place where we stopped the search:
- */
- if (use_cache)
- mm->free_area_cache = addr + len;
- return addr;
- }
- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = vma->vm_end;
- }
+ info.high_limit = addr;
- /* Make sure we didn't miss any holes */
- if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- goto full_search;
+ found = vm_unmapped_area(&info);
+ if (!(found & ~PAGE_MASK))
+ return found;
}
+
return -ENOMEM;
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize, int use_cache)
+ int psize)
{
- struct vm_area_struct *vma;
- unsigned long addr;
- struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
+ unsigned long addr, found, prev;
+ struct vm_unmapped_area_info info;
- /* check if free_area_cache is useful for us */
- if (use_cache) {
- if (len <= mm->cached_hole_size) {
- mm->cached_hole_size = 0;
- mm->free_area_cache = mm->mmap_base;
- }
-
- /* either no address requested or can't fit in requested
- * address hole
- */
- addr = mm->free_area_cache;
-
- /* make sure it can fit in the remaining address space */
- if (addr > len) {
- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
- mask = slice_range_to_mask(addr, len);
- if (slice_check_fit(mask, available) &&
- slice_area_is_free(mm, addr, len))
- /* remember the address as a hint for
- * next time
- */
- return (mm->free_area_cache = addr);
- }
- }
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
+ info.align_offset = 0;
addr = mm->mmap_base;
- while (addr > len) {
- /* Go down by chunk size */
- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
-
- /* Check for hit with different page size */
- mask = slice_range_to_mask(addr, len);
- if (!slice_check_fit(mask, available)) {
- if (addr < SLICE_LOW_TOP)
- addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
- else if (addr < (1ul << SLICE_HIGH_SHIFT))
- addr = SLICE_LOW_TOP;
- else
- addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
+ while (addr > PAGE_SIZE) {
+ info.high_limit = addr;
+ if (!slice_scan_available(addr - 1, available, 0, &addr))
continue;
- }
+ prev_slice:
/*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
+ * At this point [addr; info.high_limit) covers
+ * available slices only and starts at a slice boundary.
+ * Check if we need to reduce the range, or if we can
+ * extend it to cover the previous available slice.
*/
- vma = find_vma(mm, addr);
- if (!vma || (addr + len) <= vma->vm_start) {
- /* remember the address as a hint for next time */
- if (use_cache)
- mm->free_area_cache = addr;
- return addr;
+ if (addr < PAGE_SIZE)
+ addr = PAGE_SIZE;
+ else if (slice_scan_available(addr - 1, available, 0, &prev)) {
+ addr = prev;
+ goto prev_slice;
}
+ info.low_limit = addr;
- /* remember the largest hole we saw so far */
- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start;
+ found = vm_unmapped_area(&info);
+ if (!(found & ~PAGE_MASK))
+ return found;
}
/*
@@ -373,28 +351,18 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- addr = slice_find_area_bottomup(mm, len, available, psize, 0);
-
- /*
- * Restore the topdown base:
- */
- if (use_cache) {
- mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
- }
-
- return addr;
+ return slice_find_area_bottomup(mm, len, available, psize);
}
static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
struct slice_mask mask, int psize,
- int topdown, int use_cache)
+ int topdown)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize, use_cache);
+ return slice_find_area_topdown(mm, len, mask, psize);
else
- return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
+ return slice_find_area_bottomup(mm, len, mask, psize);
}
#define or_mask(dst, src) do { \
@@ -415,7 +383,7 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
- int topdown, int use_cache)
+ int topdown)
{
struct slice_mask mask = {0, 0};
struct slice_mask good_mask;
@@ -430,8 +398,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
BUG_ON(mm->task_size == 0);
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
- slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
- addr, len, flags, topdown, use_cache);
+ slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
+ addr, len, flags, topdown);
if (len > mm->task_size)
return -ENOMEM;
@@ -503,8 +471,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing
* slices for that size
*/
- newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
- use_cache);
+ newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
if (newaddr != -ENOMEM) {
/* Found within the good mask, we don't have to setup,
* we thus return directly
@@ -536,8 +503,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* anywhere in the good area.
*/
if (addr) {
- addr = slice_find_area(mm, len, good_mask, psize, topdown,
- use_cache);
+ addr = slice_find_area(mm, len, good_mask, psize, topdown);
if (addr != -ENOMEM) {
slice_dbg(" found area at 0x%lx\n", addr);
return addr;
@@ -547,15 +513,14 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing slices
* for that size plus free slices
*/
- addr = slice_find_area(mm, len, potential_mask, psize, topdown,
- use_cache);
+ addr = slice_find_area(mm, len, potential_mask, psize, topdown);
#ifdef CONFIG_PPC_64K_PAGES
if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
/* retry the search with 4k-page slices included */
or_mask(potential_mask, compat_mask);
addr = slice_find_area(mm, len, potential_mask, psize,
- topdown, use_cache);
+ topdown);
}
#endif
@@ -586,8 +551,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long flags)
{
return slice_get_unmapped_area(addr, len, flags,
- current->mm->context.user_psize,
- 0, 1);
+ current->mm->context.user_psize, 0);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -597,8 +561,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long flags)
{
return slice_get_unmapped_area(addr0, len, flags,
- current->mm->context.user_psize,
- 1, 1);
+ current->mm->context.user_psize, 1);
}
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index df32a838dcfa..6888cad5103d 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -414,9 +414,9 @@ static void setup_page_sizes(void)
#ifdef CONFIG_PPC_FSL_BOOK3E
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
+ int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
- if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
- (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
+ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
unsigned int min_pg, max_pg;
@@ -442,6 +442,20 @@ static void setup_page_sizes(void)
goto no_indirect;
}
+
+ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
+ u32 tlb1ps = mfspr(SPRN_TLB1PS);
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+ struct mmu_psize_def *def = &mmu_psize_defs[psize];
+
+ if (tlb1ps & (1U << (def->shift - 10))) {
+ def->flags |= MMU_PAGE_SIZE_DIRECT;
+ }
+ }
+
+ goto no_indirect;
+ }
#endif
tlb0cfg = mfspr(SPRN_TLB0CFG);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index e834f1ec23c8..c427ae36374a 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -671,16 +671,12 @@ void bpf_jit_compile(struct sk_filter *fp)
}
if (bpf_jit_enable > 1)
- pr_info("flen=%d proglen=%u pass=%d image=%p\n",
- flen, proglen, pass, image);
+ /* Note that we output the base address of the code_base
+ * rather than image, since opcodes are in code_base.
+ */
+ bpf_jit_dump(flen, proglen, pass, code_base);
if (image) {
- if (bpf_jit_enable > 1)
- print_hex_dump(KERN_ERR, "JIT code: ",
- DUMP_PREFIX_ADDRESS,
- 16, 1, code_base,
- proglen, false);
-
bpf_flush_icache(code_base, code_base + (proglen/4));
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index af3fac23768c..510fae10513d 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -2,9 +2,10 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PERF_EVENTS) += callchain.o
-obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
- power5+-pmu.o power6-pmu.o power7-pmu.o
+ power5+-pmu.o power6-pmu.o power7-pmu.o \
+ power8-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
diff --git a/arch/powerpc/perf/bhrb.S b/arch/powerpc/perf/bhrb.S
new file mode 100644
index 000000000000..d85f9a58ddbc
--- /dev/null
+++ b/arch/powerpc/perf/bhrb.S
@@ -0,0 +1,44 @@
+/*
+ * Basic assembly code to read BHRB entries
+ *
+ * Copyright 2013 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+
+ .text
+
+.balign 8
+
+/* r3 = n (where n = [0-31])
+ * The maximum number of BHRB entries supported with PPC_MFBHRBE instruction
+ * is 1024. We have limited number of table entries here as POWER8 implements
+ * 32 BHRB entries.
+ */
+
+/* .global read_bhrb */
+_GLOBAL(read_bhrb)
+ cmpldi r3,31
+ bgt 1f
+ ld r4,bhrb_table@got(r2)
+ sldi r3,r3,3
+ add r3,r4,r3
+ mtctr r3
+ bctr
+1: li r3,0
+ blr
+
+#define MFBHRB_TABLE1(n) PPC_MFBHRBE(R3,n); blr
+#define MFBHRB_TABLE2(n) MFBHRB_TABLE1(n); MFBHRB_TABLE1(n+1)
+#define MFBHRB_TABLE4(n) MFBHRB_TABLE2(n); MFBHRB_TABLE2(n+2)
+#define MFBHRB_TABLE8(n) MFBHRB_TABLE4(n); MFBHRB_TABLE4(n+4)
+#define MFBHRB_TABLE16(n) MFBHRB_TABLE8(n); MFBHRB_TABLE8(n+8)
+#define MFBHRB_TABLE32(n) MFBHRB_TABLE16(n); MFBHRB_TABLE16(n+16)
+
+bhrb_table:
+ MFBHRB_TABLE32(0)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 65362e98eb26..c627843c5b2e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -19,6 +19,11 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
+#define BHRB_MAX_ENTRIES 32
+#define BHRB_TARGET 0x0000000000000002
+#define BHRB_PREDICTION 0x0000000000000001
+#define BHRB_EA 0xFFFFFFFFFFFFFFFC
+
struct cpu_hw_events {
int n_events;
int n_percpu;
@@ -38,7 +43,15 @@ struct cpu_hw_events {
unsigned int group_flag;
int n_txn_start;
+
+ /* BHRB bits */
+ u64 bhrb_filter; /* BHRB HW branch filter */
+ int bhrb_users;
+ void *bhrb_context;
+ struct perf_branch_stack bhrb_stack;
+ struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
};
+
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
@@ -89,6 +102,11 @@ static inline int siar_valid(struct pt_regs *regs)
#endif /* CONFIG_PPC32 */
+static bool regs_use_siar(struct pt_regs *regs)
+{
+ return !!(regs->result & 1);
+}
+
/*
* Things that are specific to 64-bit implementations.
*/
@@ -98,11 +116,12 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
- if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
+
return 0;
}
@@ -130,24 +149,35 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
*addrp = mfspr(SPRN_SDAR);
}
-static bool mmcra_sihv(unsigned long mmcra)
+static bool regs_sihv(struct pt_regs *regs)
{
unsigned long sihv = MMCRA_SIHV;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return !!(regs->dar & SIER_SIHV);
+
if (ppmu->flags & PPMU_ALT_SIPR)
sihv = POWER6_MMCRA_SIHV;
- return !!(mmcra & sihv);
+ return !!(regs->dsisr & sihv);
}
-static bool mmcra_sipr(unsigned long mmcra)
+static bool regs_sipr(struct pt_regs *regs)
{
unsigned long sipr = MMCRA_SIPR;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return !!(regs->dar & SIER_SIPR);
+
if (ppmu->flags & PPMU_ALT_SIPR)
sipr = POWER6_MMCRA_SIPR;
- return !!(mmcra & sipr);
+ return !!(regs->dsisr & sipr);
+}
+
+static bool regs_no_sipr(struct pt_regs *regs)
+{
+ return !!(regs->result & 2);
}
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
@@ -161,8 +191,7 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
- unsigned long mmcra = regs->dsisr;
- unsigned long use_siar = regs->result;
+ bool use_siar = regs_use_siar(regs);
if (!use_siar)
return perf_flags_from_msr(regs);
@@ -173,7 +202,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
* SIAR which should give slightly more reliable
* results
*/
- if (ppmu->flags & PPMU_NO_SIPR) {
+ if (regs_no_sipr(regs)) {
unsigned long siar = mfspr(SPRN_SIAR);
if (siar >= PAGE_OFFSET)
return PERF_RECORD_MISC_KERNEL;
@@ -181,16 +210,19 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
}
/* PR has priority over HV, so order below is important */
- if (mmcra_sipr(mmcra))
+ if (regs_sipr(regs))
return PERF_RECORD_MISC_USER;
- if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
+
+ if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
+
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
+ * Overload regs->dar to store SIER if we have it.
* Overload regs->result to specify whether we should use the MSR (result
* is zero) or the SIAR (result is non zero).
*/
@@ -200,6 +232,24 @@ static inline void perf_read_regs(struct pt_regs *regs)
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
int use_siar;
+ regs->dsisr = mmcra;
+ regs->result = 0;
+
+ if (ppmu->flags & PPMU_NO_SIPR)
+ regs->result |= 2;
+
+ /*
+ * On power8 if we're in random sampling mode, the SIER is updated.
+ * If we're in continuous sampling mode, we don't have SIPR.
+ */
+ if (ppmu->flags & PPMU_HAS_SIER) {
+ if (marked)
+ regs->dar = mfspr(SPRN_SIER);
+ else
+ regs->result |= 2;
+ }
+
+
/*
* If this isn't a PMU exception (eg a software event) the SIAR is
* not valid. Use pt_regs.
@@ -223,13 +273,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
- else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
+ else if (!regs_no_sipr(regs) && regs_sipr(regs))
use_siar = 0;
else
use_siar = 1;
- regs->dsisr = mmcra;
- regs->result = use_siar;
+ regs->result |= use_siar;
}
/*
@@ -822,6 +871,9 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
+ if (cpuhw->bhrb_users)
+ ppmu->config_bhrb(cpuhw->bhrb_filter);
+
local_irq_restore(flags);
}
@@ -852,6 +904,47 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
+/* Reset all possible BHRB entries */
+static void power_pmu_bhrb_reset(void)
+{
+ asm volatile(PPC_CLRBHRB);
+}
+
+void power_pmu_bhrb_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!ppmu->bhrb_nr)
+ return;
+
+ /* Clear BHRB if we changed task context to avoid data leaks */
+ if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
+ power_pmu_bhrb_reset();
+ cpuhw->bhrb_context = event->ctx;
+ }
+ cpuhw->bhrb_users++;
+}
+
+void power_pmu_bhrb_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!ppmu->bhrb_nr)
+ return;
+
+ cpuhw->bhrb_users--;
+ WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+
+ if (!cpuhw->disabled && !cpuhw->bhrb_users) {
+ /* BHRB cannot be turned off when other
+ * events are active on the PMU.
+ */
+
+ /* avoid stale pointer */
+ cpuhw->bhrb_context = NULL;
+ }
+}
+
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
@@ -911,6 +1004,9 @@ nocheck:
ret = 0;
out:
+ if (has_branch_stack(event))
+ power_pmu_bhrb_enable(event);
+
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
@@ -963,6 +1059,9 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
+ if (has_branch_stack(event))
+ power_pmu_bhrb_disable(event);
+
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -1081,6 +1180,15 @@ int power_pmu_commit_txn(struct pmu *pmu)
return 0;
}
+/* Called from ctxsw to prevent one process's branch entries to
+ * mingle with the other process's entries during context switch.
+ */
+void power_pmu_flush_branch_stack(void)
+{
+ if (ppmu->bhrb_nr)
+ power_pmu_bhrb_reset();
+}
+
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
@@ -1195,9 +1303,11 @@ static int power_pmu_event_init(struct perf_event *event)
if (!ppmu)
return -ENOENT;
- /* does not support taken branch sampling */
- if (has_branch_stack(event))
- return -EOPNOTSUPP;
+ if (has_branch_stack(event)) {
+ /* PMU has BHRB enabled */
+ if (!(ppmu->flags & PPMU_BHRB))
+ return -EOPNOTSUPP;
+ }
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
@@ -1278,6 +1388,15 @@ static int power_pmu_event_init(struct perf_event *event)
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
+
+ if (has_branch_stack(event)) {
+ cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if(cpuhw->bhrb_filter == -1)
+ return -EOPNOTSUPP;
+ }
+
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
@@ -1336,8 +1455,79 @@ struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
+ .flush_branch_stack = power_pmu_flush_branch_stack,
};
+/* Processing BHRB entries */
+void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+{
+ u64 val;
+ u64 addr;
+ int r_index, u_index, target, pred;
+
+ r_index = 0;
+ u_index = 0;
+ while (r_index < ppmu->bhrb_nr) {
+ /* Assembly read function */
+ val = read_bhrb(r_index);
+
+ /* Terminal marker: End of valid BHRB entries */
+ if (val == 0) {
+ break;
+ } else {
+ /* BHRB field break up */
+ addr = val & BHRB_EA;
+ pred = val & BHRB_PREDICTION;
+ target = val & BHRB_TARGET;
+
+ /* Probable Missed entry: Not applicable for POWER8 */
+ if ((addr == 0) && (target == 0) && (pred == 1)) {
+ r_index++;
+ continue;
+ }
+
+ /* Real Missed entry: Power8 based missed entry */
+ if ((addr == 0) && (target == 1) && (pred == 1)) {
+ r_index++;
+ continue;
+ }
+
+ /* Reserved condition: Not a valid entry */
+ if ((addr == 0) && (target == 1) && (pred == 0)) {
+ r_index++;
+ continue;
+ }
+
+ /* Is a target address */
+ if (val & BHRB_TARGET) {
+ /* First address cannot be a target address */
+ if (r_index == 0) {
+ r_index++;
+ continue;
+ }
+
+ /* Update target address for the previous entry */
+ cpuhw->bhrb_entries[u_index - 1].to = addr;
+ cpuhw->bhrb_entries[u_index - 1].mispred = pred;
+ cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
+
+ /* Dont increment u_index */
+ r_index++;
+ } else {
+ /* Update address, flags for current entry */
+ cpuhw->bhrb_entries[u_index].from = addr;
+ cpuhw->bhrb_entries[u_index].mispred = pred;
+ cpuhw->bhrb_entries[u_index].predicted = ~pred;
+
+ /* Successfully popullated one entry */
+ u_index++;
+ r_index++;
+ }
+ }
+ }
+ cpuhw->bhrb_stack.nr = u_index;
+ return;
+}
/*
* A counter has overflowed; update its count and record
@@ -1397,6 +1587,13 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
+ if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
+ struct cpu_hw_events *cpuhw;
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ power_pmu_bhrb_read(cpuhw);
+ data.br_stack = &cpuhw->bhrb_stack;
+ }
+
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
@@ -1422,7 +1619,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long use_siar = regs->result;
+ bool use_siar = regs_use_siar(regs);
if (use_siar && siar_valid(regs))
return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index a8757baa28f3..b03b6dc0172d 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -671,7 +671,7 @@ static struct power_pmu power5p_pmu = {
.get_alternatives = power5p_get_alternatives,
.disable_pmc = power5p_disable_pmc,
.limited_pmc_event = power5p_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6,
+ .flags = PPMU_LIMITED_PMC5_6 | PPMU_HAS_SSLOT,
.n_generic = ARRAY_SIZE(power5p_generic_events),
.generic_events = power5p_generic_events,
.cache_events = &power5p_cache_events,
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index e7f06eb7a861..1e8ce423c3af 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -615,6 +615,7 @@ static struct power_pmu power5_pmu = {
.n_generic = ARRAY_SIZE(power5_generic_events),
.generic_events = power5_generic_events,
.cache_events = &power5_cache_events,
+ .flags = PPMU_HAS_SSLOT,
};
static int __init init_power5_pmu(void)
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
new file mode 100644
index 000000000000..f7d1c4fff303
--- /dev/null
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -0,0 +1,592 @@
+/*
+ * Performance counter support for POWER8 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <asm/firmware.h>
+
+
+/*
+ * Some power8 event codes.
+ */
+#define PM_CYC 0x0001e
+#define PM_GCT_NOSLOT_CYC 0x100f8
+#define PM_CMPLU_STALL 0x4000a
+#define PM_INST_CMPL 0x00002
+#define PM_BRU_FIN 0x10068
+#define PM_BR_MPRED_CMPL 0x400f6
+
+
+/*
+ * Raw event encoding for POWER8:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ thresh_cmp ] [ thresh_ctl ]
+ * |
+ * thresh start/stop OR FAB match -*
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
+ * | | | | |
+ * | | | | *- mark
+ * | | *- L1/L2/L3 cache_sel |
+ * | | |
+ * | *- sampling mode for marked events *- combine
+ * |
+ * *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[x] = combine (PMCxCOMB)
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ * # PM_MRK_FAB_RSP_MATCH
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ * # PM_MRK_FAB_RSP_MATCH_CYC
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else
+ * MMCRA[48:55] = thresh_ctl (THRESH START/END)
+ *
+ * if thresh_sel:
+ * MMCRA[45:47] = thresh_sel
+ *
+ * if thresh_cmp:
+ * MMCRA[22:24] = thresh_cmp[0:2]
+ * MMCRA[25:31] = thresh_cmp[3:9]
+ *
+ * if unit == 6 or unit == 7
+ * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
+ * else if unit == 8 or unit == 9:
+ * if cache_sel[0] == 0: # L3 bank
+ * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
+ * else if cache_sel[0] == 1:
+ * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
+ * else if cache_sel[1]: # L1 event
+ * MMCR1[16] = cache_sel[2]
+ * MMCR1[17] = cache_sel[3]
+ *
+ * if mark:
+ * MMCRA[63] = 1 (SAMPLE_ENABLE)
+ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
+ * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
+ *
+ */
+
+#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
+#define EVENT_THR_CMP_MASK 0x3ff
+#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
+#define EVENT_THR_CTL_MASK 0xffull
+#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
+#define EVENT_THR_SEL_MASK 0x7
+#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
+#define EVENT_THRESH_MASK 0x1fffffull
+#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
+#define EVENT_SAMPLE_MASK 0x1f
+#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
+#define EVENT_CACHE_SEL_MASK 0xf
+#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
+#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
+#define EVENT_PMC_MASK 0xf
+#define EVENT_UNIT_SHIFT 12 /* Unit */
+#define EVENT_UNIT_MASK 0xf
+#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
+#define EVENT_COMBINE_MASK 0x1
+#define EVENT_MARKED_SHIFT 8 /* Marked bit */
+#define EVENT_MARKED_MASK 0x1
+#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
+#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+
+/* MMCRA IFM bits - POWER8 */
+#define POWER8_MMCRA_IFM1 0x0000000040000000UL
+#define POWER8_MMCRA_IFM2 0x0000000080000000UL
+#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+
+#define ONLY_PLM \
+ (PERF_SAMPLE_BRANCH_USER |\
+ PERF_SAMPLE_BRANCH_KERNEL |\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Layout of constraint bits:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
+ * |
+ * thresh_sel -*
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
+ * | |
+ * L1 I/D qualifier -* | Count of events for each PMC.
+ * | p1, p2, p3, p4, p5, p6.
+ * nc - number of counters -*
+ *
+ * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
+ * we want the low bit of each field to be added to any existing value.
+ *
+ * Everything else is a value field.
+ */
+
+#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
+#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
+
+/* We just throw all the threshold bits into the constraint */
+#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
+#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
+
+#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
+#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
+
+#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
+#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
+
+/*
+ * For NC we are counting up to 4 events. This requires three bits, and we need
+ * the fifth event to overflow and set the 4th bit. To achieve that we bias the
+ * fields by 3 in test_adder.
+ */
+#define CNST_NC_SHIFT 12
+#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
+#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
+#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
+
+/*
+ * For the per-PMC fields we have two bits. The low bit is added, so if two
+ * events ask for the same PMC the sum will overflow, setting the high bit,
+ * indicating an error. So our mask sets the high bit.
+ */
+#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
+#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
+#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
+
+/* Our add_fields is defined as: */
+#define POWER8_ADD_FIELDS \
+ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
+ CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
+
+
+/* Bits in MMCR1 for POWER8 */
+#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
+#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
+#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
+#define MMCR1_DC_QUAL_SHIFT 47
+#define MMCR1_IC_QUAL_SHIFT 46
+
+/* Bits in MMCRA for POWER8 */
+#define MMCRA_SAMP_MODE_SHIFT 1
+#define MMCRA_SAMP_ELIG_SHIFT 4
+#define MMCRA_THR_CTL_SHIFT 8
+#define MMCRA_THR_SEL_SHIFT 16
+#define MMCRA_THR_CMP_SHIFT 32
+#define MMCRA_SDAR_MODE_TLB (1ull << 42)
+
+
+static inline bool event_is_fab_match(u64 event)
+{
+ /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
+ event &= 0xff0fe;
+
+ /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
+ return (event == 0x30056 || event == 0x4f052);
+}
+
+static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+{
+ unsigned int unit, pmc, cache;
+ unsigned long mask, value;
+
+ mask = value = 0;
+
+ pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
+
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+
+ mask |= CNST_PMC_MASK(pmc);
+ value |= CNST_PMC_VAL(pmc);
+
+ if (pmc >= 5 && event != 0x500fa && event != 0x600f4)
+ return -1;
+ }
+
+ if (pmc <= 4) {
+ /*
+ * Add to number of counters in use. Note this includes events with
+ * a PMC of 0 - they still need a PMC, it's just assigned later.
+ * Don't count events on PMC 5 & 6, there is only one valid event
+ * on each of those counters, and they are handled above.
+ */
+ mask |= CNST_NC_MASK;
+ value |= CNST_NC_VAL;
+ }
+
+ if (unit >= 6 && unit <= 9) {
+ /*
+ * L2/L3 events contain a cache selector field, which is
+ * supposed to be programmed into MMCRC. However MMCRC is only
+ * HV writable, and there is no API for guest kernels to modify
+ * it. The solution is for the hypervisor to initialise the
+ * field to zeroes, and for us to only ever allow events that
+ * have a cache selector of zero.
+ */
+ if (cache)
+ return -1;
+
+ } else if (event & EVENT_IS_L1) {
+ mask |= CNST_L1_QUAL_MASK;
+ value |= CNST_L1_QUAL_VAL(cache);
+ }
+
+ if (event & EVENT_IS_MARKED) {
+ mask |= CNST_SAMPLE_MASK;
+ value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
+ }
+
+ /*
+ * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold control bits are used for the match value.
+ */
+ if (event_is_fab_match(event)) {
+ mask |= CNST_FAB_MATCH_MASK;
+ value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
+ } else {
+ /*
+ * Check the mantissa upper two bits are not zero, unless the
+ * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
+ */
+ unsigned int cmp, exp;
+
+ cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ exp = cmp >> 7;
+
+ if (exp && (cmp & 0x60) == 0)
+ return -1;
+
+ mask |= CNST_THRESH_MASK;
+ value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
+ }
+
+ *maskp = mask;
+ *valp = value;
+
+ return 0;
+}
+
+static int power8_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[])
+{
+ unsigned long mmcra, mmcr1, unit, combine, psel, cache, val;
+ unsigned int pmc, pmc_inuse;
+ int i;
+
+ pmc_inuse = 0;
+
+ /* First pass to count resource use */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ if (pmc)
+ pmc_inuse |= 1 << pmc;
+ }
+
+ /* In continous sampling mode, update SDAR on TLB miss */
+ mmcra = MMCRA_SDAR_MODE_TLB;
+ mmcr1 = 0;
+
+ /* Second pass: assign PMCs, set all MMCR1 fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
+ psel = event[i] & EVENT_PSEL_MASK;
+
+ if (!pmc) {
+ for (pmc = 1; pmc <= 4; ++pmc) {
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ }
+
+ pmc_inuse |= 1 << pmc;
+ }
+
+ if (pmc <= 4) {
+ mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
+ mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
+ mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
+ }
+
+ if (event[i] & EVENT_IS_L1) {
+ cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
+ mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
+ cache >>= 1;
+ mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
+ }
+
+ if (event[i] & EVENT_IS_MARKED) {
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+
+ val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val) {
+ mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
+ mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
+ }
+ }
+
+ /*
+ * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold bits are used for the match value.
+ */
+ if (event_is_fab_match(event[i])) {
+ mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
+ EVENT_THR_CTL_MASK;
+ } else {
+ val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ mmcra |= val << MMCRA_THR_CTL_SHIFT;
+ val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
+ mmcra |= val << MMCRA_THR_SEL_SHIFT;
+ val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ mmcra |= val << MMCRA_THR_CMP_SHIFT;
+ }
+
+ hwc[i] = pmc - 1;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+
+ /* pmc_inuse is 1-based */
+ if (pmc_inuse & 2)
+ mmcr[0] = MMCR0_PMC1CE;
+
+ if (pmc_inuse & 0x7c)
+ mmcr[0] |= MMCR0_PMCjCE;
+
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+
+ return 0;
+}
+
+#define MAX_ALT 2
+
+/* Table of alternatives, sorted by column 0 */
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */
+ { 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */
+ { 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */
+ { 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */
+ { 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */
+ { 0x20036, 0x40036 }, /* PM_BR_2PATH */
+ { 0x200f2, 0x300f2 }, /* PM_INST_DISP */
+ { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
+ { 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */
+ { 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */
+ { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u64 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+
+ return -1;
+}
+
+static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, num_alt = 0;
+ u64 alt_event;
+
+ alt[num_alt++] = event;
+
+ i = find_alternative(event);
+ if (i >= 0) {
+ /* Filter out the original event, it's already in alt[0] */
+ for (j = 0; j < MAX_ALT; ++j) {
+ alt_event = event_alternatives[i][j];
+ if (alt_event && alt_event != event)
+ alt[num_alt++] = alt_event;
+ }
+ }
+
+ if (flags & PPMU_ONLY_COUNT_RUN) {
+ /*
+ * We're only counting in RUN state, so PM_CYC is equivalent to
+ * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
+ */
+ j = num_alt;
+ for (i = 0; i < num_alt; ++i) {
+ switch (alt[i]) {
+ case 0x1e: /* PM_CYC */
+ alt[j++] = 0x600f4; /* PM_RUN_CYC */
+ break;
+ case 0x600f4: /* PM_RUN_CYC */
+ alt[j++] = 0x1e;
+ break;
+ case 0x2: /* PM_PPC_CMPL */
+ alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
+ break;
+ case 0x500fa: /* PM_RUN_INST_CMPL */
+ alt[j++] = 0x2; /* PM_PPC_CMPL */
+ break;
+ }
+ }
+ num_alt = j;
+ }
+
+ return num_alt;
+}
+
+static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
+}
+
+PMU_FORMAT_ATTR(event, "config:0-49");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+PMU_FORMAT_ATTR(cache_sel, "config:20-23");
+PMU_FORMAT_ATTR(sample_mode, "config:24-28");
+PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
+PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
+PMU_FORMAT_ATTR(thresh_start, "config:36-39");
+PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
+
+static struct attribute *power8_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ &format_attr_cache_sel.attr,
+ &format_attr_sample_mode.attr,
+ &format_attr_thresh_sel.attr,
+ &format_attr_thresh_stop.attr,
+ &format_attr_thresh_start.attr,
+ &format_attr_thresh_cmp.attr,
+ NULL,
+};
+
+struct attribute_group power8_pmu_format_group = {
+ .name = "format",
+ .attrs = power8_pmu_format_attr,
+};
+
+static const struct attribute_group *power8_pmu_attr_groups[] = {
+ &power8_pmu_format_group,
+ NULL,
+};
+
+static int power8_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+};
+
+static u64 power8_bhrb_filter_map(u64 branch_sample_type)
+{
+ u64 pmu_bhrb_filter = 0;
+ u64 br_privilege = branch_sample_type & ONLY_PLM;
+
+ /* BHRB and regular PMU events share the same prvillege state
+ * filter configuration. BHRB is always recorded along with a
+ * regular PMU event. So privilege state filter criteria for BHRB
+ * and the companion PMU events has to be the same. As a default
+ * "perf record" tool sets all privillege bits ON when no filter
+ * criteria is provided in the command line. So as along as all
+ * privillege bits are ON or they are OFF, we are good to go.
+ */
+ if ((br_privilege != 7) && (br_privilege != 0))
+ return -1;
+
+ /* No branch filter requested */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
+ return pmu_bhrb_filter;
+
+ /* Invalid branch filter options - HW does not support */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
+ return pmu_bhrb_filter;
+ }
+
+ /* Every thing else is unsupported */
+ return -1;
+}
+
+static void power8_config_bhrb(u64 pmu_bhrb_filter)
+{
+ /* Enable BHRB filter in PMU */
+ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
+}
+
+static struct power_pmu power8_pmu = {
+ .name = "POWER8",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT + 1,
+ .add_fields = POWER8_ADD_FIELDS,
+ .test_adder = POWER8_TEST_ADDER,
+ .compute_mmcr = power8_compute_mmcr,
+ .config_bhrb = power8_config_bhrb,
+ .bhrb_filter_map = power8_bhrb_filter_map,
+ .get_constraint = power8_get_constraint,
+ .get_alternatives = power8_get_alternatives,
+ .disable_pmc = power8_disable_pmc,
+ .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB,
+ .n_generic = ARRAY_SIZE(power8_generic_events),
+ .generic_events = power8_generic_events,
+ .attr_groups = power8_pmu_attr_groups,
+ .bhrb_nr = 32,
+};
+
+static int __init init_power8_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ return -ENODEV;
+
+ return register_power_pmu(&power8_pmu);
+}
+early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index a392d12dd21f..bd40bbb15e14 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -20,7 +20,6 @@ config HOTFOOT
bool "Hotfoot"
depends on 40x
default n
- select 405EP
select PPC40x_SIMPLE
select PCI
help
@@ -105,9 +104,6 @@ config 405GP
select IBM405_ERR51
select IBM_EMAC_ZMII
-config 405EP
- bool
-
config 405EX
bool
select IBM_EMAC_EMAC4
@@ -119,9 +115,6 @@ config 405EZ
select IBM_EMAC_MAL_CLR_ICINTSTAT
select IBM_EMAC_MAL_COMMON_ERR
-config 405GPR
- bool
-
config XILINX_VIRTEX
bool
select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0effe9f5a1ea..7be93367d92f 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -274,6 +274,8 @@ config 440EPX
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
config 440GRX
bool
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ecf..fc9c1cbfcb1d 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,6 +7,8 @@ config PPC_MPC512x
select PPC_PCI_CHOICE
select FSL_PCI if PCI
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
config MPC5121_ADS
bool "Freescale MPC5121E ADS"
@@ -15,16 +17,16 @@ config MPC5121_ADS
help
This option enables support for the MPC5121E ADS board.
-config MPC5121_GENERIC
- bool "Generic support for simple MPC5121 based boards"
+config MPC512x_GENERIC
+ bool "Generic support for simple MPC512x based boards"
depends on PPC_MPC512x
select DEFAULT_UIMAGE
help
- This option enables support for simple MPC5121 based boards
+ This option enables support for simple MPC512x based boards
which do not need custom platform specific setup.
Compatible boards include: Protonic LVT base boards (ZANMCU
- and VICVT2).
+ and VICVT2), Freescale MPC5125 Tower system.
config PDM360NG
bool "ifm PDM360NG board"
diff --git a/arch/powerpc/platforms/512x/Makefile b/arch/powerpc/platforms/512x/Makefile
index 4efc1c4b6fb5..72fb9340e09f 100644
--- a/arch/powerpc/platforms/512x/Makefile
+++ b/arch/powerpc/platforms/512x/Makefile
@@ -3,5 +3,5 @@
#
obj-y += clock.o mpc512x_shared.o
obj-$(CONFIG_MPC5121_ADS) += mpc5121_ads.o mpc5121_ads_cpld.o
-obj-$(CONFIG_MPC5121_GENERIC) += mpc5121_generic.o
+obj-$(CONFIG_MPC512x_GENERIC) += mpc512x_generic.o
obj-$(CONFIG_PDM360NG) += pdm360ng.o
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 52d57d281724..e504166e089a 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -29,6 +29,8 @@
#include <asm/mpc5121.h>
#include <asm/clk_interface.h>
+#include "mpc512x.h"
+
#undef CLK_DEBUG
static int clocks_initialized;
@@ -683,8 +685,13 @@ static void psc_clks_init(void)
struct device_node *np;
struct platform_device *ofdev;
u32 reg;
+ const char *psc_compat;
+
+ psc_compat = mpc512x_select_psc_compat();
+ if (!psc_compat)
+ return;
- for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") {
+ for_each_compatible_node(np, NULL, psc_compat) {
if (!of_property_read_u32(np, "reg", &reg)) {
int pscnum = (reg & 0xf00) >> 8;
struct clk *clk = psc_dev_clk(pscnum);
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index c32b399eb952..0a8e60023944 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -15,6 +15,7 @@ extern void __init mpc512x_init_IRQ(void);
extern void __init mpc512x_init(void);
extern int __init mpc5121_clk_init(void);
void __init mpc512x_declare_of_platform_devices(void);
+extern const char *mpc512x_select_psc_compat(void);
extern void mpc512x_restart(char *cmd);
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index ca1ca6669990..5fb919b30924 100644
--- a/arch/powerpc/platforms/512x/mpc5121_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -4,7 +4,7 @@
* Author: John Rigby, <jrigby@freescale.com>
*
* Description:
- * MPC5121 SoC setup
+ * MPC512x SoC setup
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
@@ -28,20 +28,22 @@
*/
static const char * const board[] __initconst = {
"prt,prtlvt",
+ "fsl,mpc5125ads",
+ "ifm,ac14xx",
NULL
};
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init mpc5121_generic_probe(void)
+static int __init mpc512x_generic_probe(void)
{
return of_flat_dt_match(of_get_flat_dt_root(), board);
}
-define_machine(mpc5121_generic) {
- .name = "MPC5121 generic",
- .probe = mpc5121_generic_probe,
+define_machine(mpc512x_generic) {
+ .name = "MPC512x generic",
+ .probe = mpc512x_generic_probe,
.init = mpc512x_init,
.init_early = mpc512x_init_diu,
.setup_arch = mpc512x_setup_diu,
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index d30235b7e3f7..6eb94ab99d39 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
static inline void mpc512x_free_bootmem(struct page *page)
{
- __ClearPageReserved(page);
BUG_ON(PageTail(page));
BUG_ON(atomic_read(&page->_count) > 1);
- atomic_set(&page->_count, 1);
- __free_page(page);
- totalram_pages++;
+ free_reserved_page(page);
}
void mpc512x_release_bootmem(void)
@@ -330,26 +327,34 @@ void __init mpc512x_init_IRQ(void)
static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "fsl,mpc5121-immr", },
{ .compatible = "fsl,mpc5121-localbus", },
+ { .compatible = "fsl,mpc5121-mbx", },
+ { .compatible = "fsl,mpc5121-nfc", },
+ { .compatible = "fsl,mpc5121-sram", },
+ { .compatible = "fsl,mpc5121-pci", },
+ { .compatible = "gpio-leds", },
{},
};
void __init mpc512x_declare_of_platform_devices(void)
{
- struct device_node *np;
-
if (of_platform_bus_probe(NULL, of_bus_ids, NULL))
printk(KERN_ERR __FILE__ ": "
"Error while probing of_platform bus\n");
-
- np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-nfc");
- if (np) {
- of_platform_device_create(np, NULL, NULL);
- of_node_put(np);
- }
}
#define DEFAULT_FIFO_SIZE 16
+const char *mpc512x_select_psc_compat(void)
+{
+ if (of_machine_is_compatible("fsl,mpc5121"))
+ return "fsl,mpc5121-psc";
+
+ if (of_machine_is_compatible("fsl,mpc5125"))
+ return "fsl,mpc5125-psc";
+
+ return NULL;
+}
+
static unsigned int __init get_fifo_size(struct device_node *np,
char *prop_name)
{
@@ -375,9 +380,16 @@ void __init mpc512x_psc_fifo_init(void)
void __iomem *psc;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
+ const char *psc_compat;
int fifobase = 0; /* current fifo address in 32 bit words */
- for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") {
+ psc_compat = mpc512x_select_psc_compat();
+ if (!psc_compat) {
+ pr_err("%s: no compatible devices found\n", __func__);
+ return;
+ }
+
+ for_each_compatible_node(np, NULL, psc_compat) {
tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index a0dcd577fb0d..8f02b05f4c96 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -305,6 +305,40 @@ config PPC_QEMU_E500
unset based on the emulated CPU (or actual host CPU in the case
of KVM).
+if PPC64
+
+config T4240_QDS
+ bool "Freescale T4240 QDS"
+ select DEFAULT_UIMAGE
+ select E500
+ select PPC_E500MC
+ select PHYS_64BIT
+ select SWIOTLB
+ select ARCH_REQUIRE_GPIOLIB
+ select GPIO_MPC8XXX
+ select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
+ help
+ This option enables support for the T4240 QDS board
+
+config B4_QDS
+ bool "Freescale B4 QDS"
+ select DEFAULT_UIMAGE
+ select E500
+ select PPC_E500MC
+ select PHYS_64BIT
+ select SWIOTLB
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
+ help
+ This option enables support for the B4 QDS board
+ The B4 application development system B4 QDS is a complete
+ debugging environment intended for engineers developing
+ applications for the B4.
+
+endif
endif # FSL_SOC_BOOKE
config TQM85xx
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 07d0dbb141c0..2eab37ea4a9d 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o
+obj-$(CONFIG_T4240_QDS) += t4240_qds.o corenet_ds.o
+obj-$(CONFIG_B4_QDS) += b4_qds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8548) += sbc8548.o
diff --git a/arch/powerpc/platforms/85xx/b4_qds.c b/arch/powerpc/platforms/85xx/b4_qds.c
new file mode 100644
index 000000000000..0c6702f8b88e
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/b4_qds.c
@@ -0,0 +1,102 @@
+/*
+ * B4 QDS Setup
+ * Should apply for QDS platform of B4860 and it's personalities.
+ * viz B4860/B4420/B4220QDS
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/phy.h>
+
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
+
+#include "corenet_ds.h"
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init b4_qds_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS")) ||
+ (of_flat_dt_is_compatible(root, "fsl,B4420QDS")) ||
+ (of_flat_dt_is_compatible(root, "fsl,B4220QDS")))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS-hv")) ||
+ (of_flat_dt_is_compatible(root, "fsl,B4420QDS-hv")) ||
+ (of_flat_dt_is_compatible(root, "fsl,B4220QDS-hv"))) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+define_machine(b4_qds) {
+ .name = "B4 QDS",
+ .probe = b4_qds_probe,
+ .setup_arch = corenet_ds_setup_arch,
+ .init_IRQ = corenet_ds_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
+#ifdef CONFIG_PPC64
+ .get_irq = mpic_get_irq,
+#else
+ .get_irq = mpic_get_coreint_irq,
+#endif
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PPC64
+ .power_save = book3e_idle,
+#else
+ .power_save = e500_idle,
+#endif
+};
+
+machine_arch_initcall(b4_qds, corenet_ds_publish_devices);
+
+#ifdef CONFIG_SWIOTLB
+machine_arch_initcall(b4_qds, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 6f355d8c92f6..c59c617eee93 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -40,7 +40,7 @@ void __init corenet_ds_pic_init(void)
if (ppc_md.get_irq == mpic_get_coreint_irq)
flags |= MPIC_ENABLE_COREINT;
- mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC ");
+ mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
@@ -83,6 +83,9 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qoriq-pcie-v2.4",
},
+ {
+ .compatible = "fsl,qoriq-pcie-v3.0",
+ },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 148c2f2d9780..6a1759939c6b 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -201,7 +201,7 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
* We don't set the BPTR register here since it already points
* to the boot page properly.
*/
- mpic_reset_core(hw_cpu);
+ mpic_reset_core(nr);
/*
* wait until core is ready...
diff --git a/arch/powerpc/platforms/85xx/t4240_qds.c b/arch/powerpc/platforms/85xx/t4240_qds.c
new file mode 100644
index 000000000000..5998e9f33304
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/t4240_qds.c
@@ -0,0 +1,98 @@
+/*
+ * T4240 QDS Setup
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/phy.h>
+
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
+
+#include "corenet_ds.h"
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init t4240_qds_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_is_compatible(root, "fsl,T4240QDS"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,T4240QDS-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+define_machine(t4240_qds) {
+ .name = "T4240 QDS",
+ .probe = t4240_qds_probe,
+ .setup_arch = corenet_ds_setup_arch,
+ .init_IRQ = corenet_ds_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
+#ifdef CONFIG_PPC64
+ .get_irq = mpic_get_irq,
+#else
+ .get_irq = mpic_get_coreint_irq,
+#endif
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PPC64
+ .power_save = book3e_idle,
+#else
+ .power_save = e500_idle,
+#endif
+};
+
+machine_arch_initcall(t4240_qds, corenet_ds_publish_devices);
+
+#ifdef CONFIG_SWIOTLB
+machine_arch_initcall(t4240_qds, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 52de8bccfb30..34d224be93ba 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -6,7 +6,6 @@ source "arch/powerpc/platforms/chrp/Kconfig"
source "arch/powerpc/platforms/512x/Kconfig"
source "arch/powerpc/platforms/52xx/Kconfig"
source "arch/powerpc/platforms/powermac/Kconfig"
-source "arch/powerpc/platforms/prep/Kconfig"
source "arch/powerpc/platforms/maple/Kconfig"
source "arch/powerpc/platforms/pasemi/Kconfig"
source "arch/powerpc/platforms/ps3/Kconfig"
@@ -233,7 +232,7 @@ endmenu
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
- depends on 6xx && (PPC_PREP || PPC_PMAC)
+ depends on 6xx && PPC_PMAC
help
Some versions of the PPC601 (the first PowerPC chip) have bugs which
mean that extra synchronization instructions are required near
@@ -344,7 +343,6 @@ config FSL_ULI1575
config CPM
bool
- select PPC_CLOCK
config OF_RTC
bool
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 18e3b76c78d7..54f3936001aa 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -230,7 +230,7 @@ config PHYS_64BIT
config ALTIVEC
bool "AltiVec Support"
- depends on 6xx || POWER4
+ depends on 6xx || POWER4 || (PPC_E500MC && PPC64)
---help---
This option enables kernel support for the Altivec extensions to the
PowerPC processor. The kernel currently supports saving and restoring
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 53aaefeb3386..9978f594cac0 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -113,34 +113,10 @@ config CBE_THERM
default m
depends on CBE_RAS && SPU_BASE
-config CBE_CPUFREQ
- tristate "CBE frequency scaling"
- depends on CBE_RAS && CPU_FREQ
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CBE_CPUFREQ_PMI_ENABLE
- bool "CBE frequency scaling using PMI interface"
- depends on CBE_CPUFREQ
- default n
- help
- Select this, if you want to use the PMI interface
- to switch frequencies. Using PMI, the
- processor will not only be able to run at lower speed,
- but also at lower core voltage.
-
-config CBE_CPUFREQ_PMI
- tristate
- depends on CBE_CPUFREQ_PMI_ENABLE
- default CBE_CPUFREQ
-
config PPC_PMI
tristate
default y
- depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON
+ depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON
help
PMI (Platform Management Interrupt) is a way to
communicate with the BMC (Baseboard Management Controller).
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index a4a89350bcfc..fe053e7c73ee 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -5,9 +5,6 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
obj-$(CONFIG_CBE_RAS) += ras.o
obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
-obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o
-obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o
-cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o
obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 0f6f83988b3d..246e1d8b3af3 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -90,7 +90,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
static long beat_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
u64 hpte_v, hpte_r, slot;
@@ -103,9 +103,9 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
+ hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -191,7 +191,7 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
u64 dummy0, dummy1;
unsigned long want_v;
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
DBG_LOW(" update: "
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -228,7 +228,7 @@ static long beat_lpar_hpte_find(unsigned long vpn, int psize)
unsigned long want_v, hpte_v;
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -283,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
raw_spin_lock_irqsave(&beat_htab_lock, flags);
dummy1 = beat_lpar_hpte_getword0(slot);
@@ -314,7 +314,7 @@ void __init hpte_init_beat(void)
static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
u64 hpte_v, hpte_r, slot;
@@ -327,9 +327,9 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
+ hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -372,8 +372,8 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
unsigned long want_v;
unsigned long pss;
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
+ pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
DBG_LOW(" update: "
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -402,8 +402,8 @@ static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
slot, vpn, psize, local);
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
+ pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
deleted file mode 100644
index d4c39e32f147..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/cell-regs.h>
-#include "cbe_cpufreq.h"
-
-static DEFINE_MUTEX(cbe_switch_mutex);
-
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {1, 0},
- {2, 0},
- {3, 0},
- {4, 0},
- {5, 0},
- {6, 0},
- {8, 0},
- {10, 0},
- {0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- const u32 *max_freqp;
- u32 max_freq;
- int i, cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
- cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
- pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
- /* this ensures that policy->cpuinfo_min
- * and policy->cpuinfo_max are set correctly */
- return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- int rc;
- struct cpufreq_freqs freqs;
- unsigned int cbe_pmode_new;
-
- cpufreq_frequency_table_target(policy,
- cbe_freqs,
- target_freq,
- relation,
- &cbe_pmode_new);
-
- freqs.old = policy->cur;
- freqs.new = cbe_freqs[cbe_pmode_new].frequency;
- freqs.cpu = policy->cpu;
-
- mutex_lock(&cbe_switch_mutex);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].index);
-
- rc = set_pmode(policy->cpu, cbe_pmode_new);
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cbe_switch_mutex);
-
- return rc;
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cbe_cpufreq_verify,
- .target = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .owner = THIS_MODULE,
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- if (!machine_is(cell))
- return -ENODEV;
-
- return cpufreq_register_driver(&cbe_cpufreq_driver);
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h
deleted file mode 100644
index c1d86bfa92ff..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE)
-extern bool cbe_cpufreq_has_pmi;
-#else
-#define cbe_cpufreq_has_pmi (0)
-#endif
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
deleted file mode 100644
index 20472e487b6f..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
deleted file mode 100644
index 60a07a4f9326..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <asm/processor.h>
-#include <asm/prom.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "cbe_cpufreq.h"
-
-static u8 pmi_slow_mode_limit[MAX_CBE];
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- u8 node, slow_mode;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- pmi_slow_mode_limit[node] = slow_mode;
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-}
-
-static int pmi_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *cbe_freqs;
- u8 node;
-
- /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
- * and CPUFREQ_NOTIFY policy events?)
- */
- if (event == CPUFREQ_START)
- return 0;
-
- cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
- node = cbe_cpu_to_node(policy->cpu);
-
- pr_debug("got notified, event=%lu, node=%u\n", event, node);
-
- if (pmi_slow_mode_limit[node] != 0) {
- pr_debug("limiting node %d to slow mode %d\n",
- node, pmi_slow_mode_limit[node]);
-
- cpufreq_verify_within_limits(policy, 0,
-
- cbe_freqs[pmi_slow_mode_limit[node]].frequency);
- }
-
- return 0;
-}
-
-static struct notifier_block pmi_notifier_block = {
- .notifier_call = pmi_notifier,
-};
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-
-
-static int __init cbe_cpufreq_pmi_init(void)
-{
- cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
-
- if (!cbe_cpufreq_has_pmi)
- return -ENODEV;
-
- cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
-
- return 0;
-}
-
-static void __exit cbe_cpufreq_pmi_exit(void)
-{
- cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
- pmi_unregister_handler(&cbe_pmi_handler);
-}
-
-module_init(cbe_cpufreq_pmi_init);
-module_exit(cbe_cpufreq_pmi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index e56bb651da1a..946306b1bb4e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -550,7 +550,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
*/
iommu = cell_iommu_for_node(dev_to_node(dev));
if (iommu == NULL || list_empty(&iommu->windows)) {
- printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
+ dev_err(dev, "iommu: missing iommu for %s (node %d)\n",
of_node_full_name(dev->of_node), dev_to_node(dev));
return NULL;
}
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 59c1a1694104..348a27b12512 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -382,7 +382,7 @@ static int __init cbe_init_pm_irq(void)
unsigned int irq;
int rc, node;
- for_each_node(node) {
+ for_each_online_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
(node << IIC_IRQ_NODE_SHIFT));
if (irq == NO_IRQ) {
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8b1213993b10..f85db3a69b4a 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -715,7 +715,7 @@ static ssize_t spu_stat_show(struct device *dev,
spu->stats.libassist);
}
-static DEVICE_ATTR(stat, 0644, spu_stat_show, NULL);
+static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 68c57d38745a..90986923a53a 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -149,7 +149,6 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
return spufs_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
- .owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = spufs_attr_release, \
.read = spufs_attr_read, \
@@ -352,7 +351,7 @@ static unsigned long spufs_get_unmapped_area(struct file *file,
/* Else, try to obtain a 64K pages slice */
return slice_get_unmapped_area(addr, len, flags,
- MMU_PAGE_64K, 1, 0);
+ MMU_PAGE_64K, 1);
}
#endif /* CONFIG_SPU_FS_64K_LS */
@@ -2591,7 +2590,6 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
}
static const struct file_operations spufs_switch_log_fops = {
- .owner = THIS_MODULE,
.open = spufs_switch_log_open,
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 3f3bb4cdbbec..35f77a42bedf 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -99,6 +99,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
if (!inode)
goto out;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 039fc8e82199..2b4dc6abde6c 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -47,6 +47,25 @@ static struct platform_device mv643xx_eth_shared_device = {
.resource = mv643xx_eth_shared_resources,
};
+/*
+ * The orion mdio driver only covers shared + 0x4 up to shared + 0x84 - 1
+ */
+static struct resource mv643xx_eth_mvmdio_resources[] = {
+ [0] = {
+ .name = "ethernet mdio base",
+ .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4,
+ .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x83,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device mv643xx_eth_mvmdio_device = {
+ .name = "orion-mdio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources),
+ .resource = mv643xx_eth_shared_resources,
+};
+
static struct resource mv643xx_eth_port1_resources[] = {
[0] = {
.name = "eth port1 irq",
@@ -82,6 +101,7 @@ static struct platform_device eth_port1_device = {
static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
&mv643xx_eth_shared_device,
+ &mv643xx_eth_mvmdio_device,
&eth_port1_device,
};
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 5a8f50a9afa7..302ba43d73a1 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -9,7 +9,6 @@ config LINKSTATION
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
select DEFAULT_UIMAGE
- select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
Select LINKSTATION if configuring for one of PPC- (MPC8241)
@@ -24,7 +23,6 @@ config STORCENTER
select MPIC
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
- select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
Select STORCENTER if configuring for the iomega StorCenter
@@ -84,9 +82,6 @@ config MV64X60
select PPC_INDIRECT_PCI
select CHECK_CACHE_COHERENCY
-config MPC10X_OPENPIC
- bool
-
config GAMECUBE_COMMON
bool
diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
index b30a6a3b5bd2..b290b63661f1 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
+++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
@@ -81,17 +81,6 @@
#define MPC10X_MAPB_PCI_MEM_OFFSET (MPC10X_MAPB_ISA_MEM_BASE - \
MPC10X_MAPB_PCI_MEM_START)
-/* Set hose members to values appropriate for the mem map used */
-#define MPC10X_SETUP_HOSE(hose, map) { \
- (hose)->pci_mem_offset = MPC10X_MAP##map##_PCI_MEM_OFFSET; \
- (hose)->io_space.start = MPC10X_MAP##map##_PCI_IO_START; \
- (hose)->io_space.end = MPC10X_MAP##map##_PCI_IO_END; \
- (hose)->mem_space.start = MPC10X_MAP##map##_PCI_MEM_START; \
- (hose)->mem_space.end = MPC10X_MAP##map##_PCI_MEM_END; \
- (hose)->io_base_virt = (void *)MPC10X_MAP##map##_ISA_IO_BASE; \
-}
-
-
/* Miscellaneous Configuration register offsets */
#define MPC10X_CFG_PIR_REG 0x09
#define MPC10X_CFG_PIR_HOST_BRIDGE 0x00
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 890f30e70f98..be1e7958909e 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -273,10 +273,9 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = pas_freqs[pas_astate_new].frequency;
- freqs.cpu = policy->cpu;
mutex_lock(&pas_switch_mutex);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
@@ -288,7 +287,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&pas_switch_mutex);
ppc_proc_freq = freqs.new * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 311b804353b1..3104fad82480 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -335,7 +335,8 @@ static int pmu_set_cpu_speed(int low_speed)
return 0;
}
-static int do_set_cpu_speed(int speed_mode, int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
+ int notify)
{
struct cpufreq_freqs freqs;
unsigned long l3cr;
@@ -343,13 +344,12 @@ static int do_set_cpu_speed(int speed_mode, int notify)
freqs.old = cur_freq;
freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
- freqs.cpu = smp_processor_id();
if (freqs.old == freqs.new)
return 0;
if (notify)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
@@ -366,7 +366,7 @@ static int do_set_cpu_speed(int speed_mode, int notify)
_set_L3CR(prev_l3cr);
}
if (notify)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
@@ -393,7 +393,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
target_freq, relation, &newstate))
return -EINVAL;
- rc = do_set_cpu_speed(newstate, 1);
+ rc = do_set_cpu_speed(policy, newstate, 1);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
@@ -442,7 +442,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
- do_set_cpu_speed(CPUFREQ_HIGH, 0);
+ do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
return 0;
}
@@ -458,7 +458,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
* is that we force a switch to whatever it was, which is
* probably high speed due to our suspend() routine
*/
- do_set_cpu_speed(sleep_freq == low_freq ?
+ do_set_cpu_speed(policy, sleep_freq == low_freq ?
CPUFREQ_LOW : CPUFREQ_HIGH, 0);
ppc_proc_freq = cur_freq * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 9650c6029c82..7ba423431cfe 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -339,11 +339,10 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
freqs.new = g5_cpu_freqs[newstate].frequency;
- freqs.cpu = 0;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
rc = g5_switch_freq(newstate);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&g5_switch_mutex);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 2b8af75abc23..cf7009b8c7b6 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -824,6 +824,7 @@ static void __init parse_region_decode(struct pci_controller *hose,
hose->mem_resources[cur].name = hose->dn->full_name;
hose->mem_resources[cur].start = base;
hose->mem_resources[cur].end = end;
+ hose->mem_offset[cur] = 0;
DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end);
} else {
DBG(" : -0x%08lx\n", end);
@@ -866,7 +867,6 @@ static void __init setup_u3_ht(struct pci_controller* hose)
hose->io_resource.start = 0;
hose->io_resource.end = 0x003fffff;
hose->io_resource.flags = IORESOURCE_IO;
- hose->pci_mem_offset = 0;
hose->first_busno = 0;
hose->last_busno = 0xef;
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 74fea5c21839..d3e840d643af 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -8,6 +8,11 @@ config PPC_POWERNV
select PPC_PCI_CHOICE if EMBEDDED
default y
+config POWERNV_MSI
+ bool "Support PCI MSI on PowerNV platform"
+ depends on PCI_MSI
+ default y
+
config PPC_POWERNV_RTAS
depends on PPC_POWERNV
bool "Support for RTAS based PowerNV platforms such as BML"
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 3bb07e5e43cd..6fabe92eafb6 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -107,3 +107,4 @@ OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR);
OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS);
OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS);
OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED);
+OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index aaa0dba49471..ade4463226c6 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -34,7 +34,6 @@ int __init early_init_dt_scan_opal(unsigned long node,
{
const void *basep, *entryp;
unsigned long basesz, entrysz;
- u64 glue;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
@@ -61,6 +60,16 @@ int __init early_init_dt_scan_opal(unsigned long node,
printk("OPAL V1 detected !\n");
}
+ return 1;
+}
+
+static int __init opal_register_exception_handlers(void)
+{
+ u64 glue;
+
+ if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
+ return -ENODEV;
+
/* Hookup some exception handlers. We use the fwnmi area at 0x7000
* to provide the glue space to OPAL
*/
@@ -74,9 +83,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
glue += 128;
opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
- return 1;
+ return 0;
}
+early_initcall(opal_register_exception_handlers);
+
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
s64 len, rc;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 8e90e8906df3..1da578b7c1bf 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -26,10 +26,12 @@
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
+#include <asm/xics.h>
#include "powernv.h"
#include "pci.h"
@@ -87,6 +89,7 @@ static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
return IODA_INVALID_PE;
} while(test_and_set_bit(pe, phb->ioda.pe_alloc));
+ phb->ioda.pe_array[pe].phb = phb;
phb->ioda.pe_array[pe].pe_number = pe;
return pe;
}
@@ -431,22 +434,102 @@ static void pnv_pci_ioda_setup_PEs(void)
}
}
-static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *dev)
+static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
{
- /* We delay DMA setup after we have assigned all PE# */
+ struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+
+ /*
+ * The function can be called while the PE#
+ * hasn't been assigned. Do nothing for the
+ * case.
+ */
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+ return;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
-static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
{
- struct pci_dev *dev;
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ unsigned long start, end, inc;
+
+ start = __pa(startp);
+ end = __pa(endp);
+
+ /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
+ if (tbl->it_busno) {
+ start <<= 12;
+ end <<= 12;
+ inc = 128 << 12;
+ start |= tbl->it_busno;
+ end |= tbl->it_busno;
+ } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
+ /* p7ioc-style invalidation, 2 TCEs per write */
+ start |= (1ull << 63);
+ end |= (1ull << 63);
+ inc = 16;
+ } else {
+ /* Default (older HW) */
+ inc = 128;
+ }
- list_for_each_entry(dev, &bus->devices, bus_list) {
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
- if (dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ end |= inc - 1; /* round up end to be different than start */
+
+ mb(); /* Ensure above stores are visible */
+ while (start <= end) {
+ __raw_writeq(start, invalidate);
+ start += inc;
+ }
+
+ /*
+ * The iommu layer will do another mb() for us on build()
+ * and we don't care on free()
+ */
+}
+
+static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
+ struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ unsigned long start, end, inc;
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+
+ /* We'll invalidate DMA address in PE scope */
+ start = 0x2ul << 60;
+ start |= (pe->pe_number & 0xFF);
+ end = start;
+
+ /* Figure out the start, end and step */
+ inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
+ start |= (inc << 12);
+ inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
+ end |= (inc << 12);
+ inc = (0x1ul << 12);
+ mb();
+
+ while (start <= end) {
+ __raw_writeq(start, invalidate);
+ start += inc;
}
}
+void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+ tce32_table);
+ struct pnv_phb *phb = pe->phb;
+
+ if (phb->type == PNV_PHB_IODA1)
+ pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
+ else
+ pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
+}
+
static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe, unsigned int base,
unsigned int segs)
@@ -518,16 +601,11 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
*/
tbl->it_busno = 0;
tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE
- | TCE_PCI_SWINV_PAIR;
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
+ TCE_PCI_SWINV_PAIR;
}
iommu_init_table(tbl, phb->hose->node);
- if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
- else
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
-
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -537,6 +615,76 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
+static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ struct page *tce_mem = NULL;
+ void *addr;
+ const __be64 *swinvp;
+ struct iommu_table *tbl;
+ unsigned int tce_table_size, end;
+ int64_t rc;
+
+ /* We shouldn't already have a 32-bit DMA associated */
+ if (WARN_ON(pe->tce32_seg >= 0))
+ return;
+
+ /* The PE will reserve all possible 32-bits space */
+ pe->tce32_seg = 0;
+ end = (1 << ilog2(phb->ioda.m32_pci_base));
+ tce_table_size = (end / 0x1000) * 8;
+ pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
+ end);
+
+ /* Allocate TCE table */
+ tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
+ get_order(tce_table_size));
+ if (!tce_mem) {
+ pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
+ goto fail;
+ }
+ addr = page_address(tce_mem);
+ memset(addr, 0, tce_table_size);
+
+ /*
+ * Map TCE table through TVT. The TVE index is the PE number
+ * shifted by 1 bit for 32-bits DMA space.
+ */
+ rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ pe->pe_number << 1, 1, __pa(addr),
+ tce_table_size, 0x1000);
+ if (rc) {
+ pe_err(pe, "Failed to configure 32-bit TCE table,"
+ " err %ld\n", rc);
+ goto fail;
+ }
+
+ /* Setup linux iommu table */
+ tbl = &pe->tce32_table;
+ pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
+
+ /* OPAL variant of PHB3 invalidated TCEs */
+ swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
+ if (swinvp) {
+ /* We need a couple more fields -- an address and a data
+ * to or. Since the bus is only printed out on table free
+ * errors, and on the first pass the data will be a relative
+ * bus number, print that out instead.
+ */
+ tbl->it_busno = 0;
+ tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
+ }
+ iommu_init_table(tbl, phb->hose->node);
+
+ return;
+fail:
+ if (pe->tce32_seg >= 0)
+ pe->tce32_seg = -1;
+ if (tce_mem)
+ __free_pages(tce_mem, get_order(tce_table_size));
+}
+
static void pnv_ioda_setup_dma(struct pnv_phb *phb)
{
struct pci_controller *hose = phb->hose;
@@ -579,20 +727,49 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
if (segs > remaining)
segs = remaining;
}
- pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
- pe->dma_weight, segs);
- pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
+
+ /*
+ * For IODA2 compliant PHB3, we needn't care about the weight.
+ * The all available 32-bits DMA space will be assigned to
+ * the specific PE.
+ */
+ if (phb->type == PNV_PHB_IODA1) {
+ pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
+ pe->dma_weight, segs);
+ pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
+ } else {
+ pe_info(pe, "Assign DMA32 space\n");
+ segs = 0;
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ }
+
remaining -= segs;
base += segs;
}
}
#ifdef CONFIG_PCI_MSI
+static void pnv_ioda2_msi_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct pnv_phb *phb = container_of(chip, struct pnv_phb,
+ ioda.irq_chip);
+ int64_t rc;
+
+ rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
+ WARN_ON_ONCE(rc);
+
+ icp_native_eoi(d);
+}
+
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int is_64,
- struct msi_msg *msg)
+ unsigned int hwirq, unsigned int virq,
+ unsigned int is_64, struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+ struct irq_data *idata;
+ struct irq_chip *ichip;
unsigned int xive_num = hwirq - phb->msi_base;
uint64_t addr64;
uint32_t addr32, data;
@@ -637,6 +814,23 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = data;
+ /*
+ * Change the IRQ chip for the MSI interrupts on PHB3.
+ * The corresponding IRQ chip should be populated for
+ * the first time.
+ */
+ if (phb->type == PNV_PHB_IODA2) {
+ if (!phb->ioda.irq_chip_init) {
+ idata = irq_get_irq_data(virq);
+ ichip = irq_data_get_irq_chip(idata);
+ phb->ioda.irq_chip_init = 1;
+ phb->ioda.irq_chip = *ichip;
+ phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
+ }
+
+ irq_set_chip(virq, &phb->ioda.irq_chip);
+ }
+
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
@@ -647,7 +841,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
{
- unsigned int bmap_size;
+ unsigned int count;
const __be32 *prop = of_get_property(phb->hose->dn,
"ibm,opal-msi-ranges", NULL);
if (!prop) {
@@ -658,18 +852,17 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
return;
phb->msi_base = be32_to_cpup(prop);
- phb->msi_count = be32_to_cpup(prop + 1);
- bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
- phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
- if (!phb->msi_map) {
+ count = be32_to_cpup(prop + 1);
+ if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
phb->hose->global_number);
return;
}
+
phb->msi_setup = pnv_pci_ioda_msi_setup;
phb->msi32_support = 1;
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- phb->msi_count, phb->msi_base);
+ count, phb->msi_base);
}
#else
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
@@ -722,11 +915,14 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
index++;
}
} else if (res->flags & IORESOURCE_MEM) {
+ /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
+ * harden that algorithm when we start supporting M64
+ */
region.start = res->start -
- hose->pci_mem_offset -
+ hose->mem_offset[0] -
phb->ioda.m32_pci_base;
region.end = res->end -
- hose->pci_mem_offset -
+ hose->mem_offset[0] -
phb->ioda.m32_pci_base;
index = region.start / phb->ioda.m32_segsize;
@@ -852,18 +1048,19 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
}
-void __init pnv_pci_init_ioda1_phb(struct device_node *np)
+void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
{
struct pci_controller *hose;
static int primary = 1;
struct pnv_phb *phb;
unsigned long size, m32map_off, iomap_off, pemap_off;
const u64 *prop64;
+ const u32 *prop32;
u64 phb_id;
void *aux;
long rc;
- pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name);
+ pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
if (!prop64) {
@@ -890,47 +1087,46 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
hose->last_busno = 0xff;
hose->private_data = phb;
phb->opal_id = phb_id;
- phb->type = PNV_PHB_IODA1;
+ phb->type = ioda_type;
/* Detect specific models for error handling */
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
phb->model = PNV_PHB_MODEL_P7IOC;
+ else if (of_device_is_compatible(np, "ibm,power8-pciex"))
+ phb->model = PNV_PHB_MODEL_PHB3;
else
phb->model = PNV_PHB_MODEL_UNKNOWN;
- /* We parse "ranges" now since we need to deduce the register base
- * from the IO base
- */
+ /* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(phb->hose, np, primary);
primary = 0;
- /* Magic formula from Milton */
+ /* Get registers */
phb->regs = of_iomap(np, 0);
if (phb->regs == NULL)
pr_err(" Failed to map registers !\n");
-
- /* XXX This is hack-a-thon. This needs to be changed so that:
- * - we obtain stuff like PE# etc... from device-tree
- * - we properly re-allocate M32 ourselves
- * (the OFW one isn't very good)
- */
-
/* Initialize more IODA stuff */
- phb->ioda.total_pe = 128;
+ prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
+ if (!prop32)
+ phb->ioda.total_pe = 1;
+ else
+ phb->ioda.total_pe = *prop32;
phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
- /* OFW Has already off top 64k of M32 space (MSI space) */
+ /* FW Has already off top 64k of M32 space (MSI space) */
phb->ioda.m32_size += 0x10000;
phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
- phb->ioda.m32_pci_base = hose->mem_resources[0].start -
- hose->pci_mem_offset;
+ phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
phb->ioda.io_size = hose->pci_io_size;
phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
- /* Allocate aux data & arrays */
+ /* Allocate aux data & arrays
+ *
+ * XXX TODO: Don't allocate io segmap on PHB3
+ */
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
m32map_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
@@ -960,7 +1156,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
hose->mem_resources[2].start = 0;
hose->mem_resources[2].end = 0;
-#if 0
+#if 0 /* We should really do that ... */
rc = opal_pci_set_phb_mem_window(opal->phb_id,
window_type,
window_num,
@@ -974,16 +1170,6 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
phb->ioda.m32_size, phb->ioda.m32_segsize,
phb->ioda.io_size, phb->ioda.io_segsize);
- if (phb->regs) {
- pr_devel(" BUID = 0x%016llx\n", in_be64(phb->regs + 0x100));
- pr_devel(" PHB2_CR = 0x%016llx\n", in_be64(phb->regs + 0x160));
- pr_devel(" IO_BAR = 0x%016llx\n", in_be64(phb->regs + 0x170));
- pr_devel(" IO_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x178));
- pr_devel(" IO_SAR = 0x%016llx\n", in_be64(phb->regs + 0x180));
- pr_devel(" M32_BAR = 0x%016llx\n", in_be64(phb->regs + 0x190));
- pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198));
- pr_devel(" M32_SAR = 0x%016llx\n", in_be64(phb->regs + 0x1a0));
- }
phb->hose->ops = &pnv_pci_ops;
/* Setup RID -> PE mapping function */
@@ -1011,7 +1197,18 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
if (rc)
pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
- opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
+
+ /*
+ * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
+ * has cleared the RTT which has the same effect
+ */
+ if (ioda_type == PNV_PHB_IODA1)
+ opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
+}
+
+void pnv_pci_init_ioda2_phb(struct device_node *np)
+{
+ pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
}
void __init pnv_pci_init_ioda_hub(struct device_node *np)
@@ -1034,6 +1231,6 @@ void __init pnv_pci_init_ioda_hub(struct device_node *np)
for_each_child_of_node(np, phbn) {
/* Look for IODA1 PHBs */
if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
- pnv_pci_init_ioda1_phb(phbn);
+ pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);
}
}
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 7db8771a40f5..92b37a0186c9 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -26,6 +26,7 @@
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
@@ -41,8 +42,8 @@
#ifdef CONFIG_PCI_MSI
static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int is_64,
- struct msi_msg *msg)
+ unsigned int hwirq, unsigned int virq,
+ unsigned int is_64, struct msi_msg *msg)
{
if (WARN_ON(!is_64))
return -ENXIO;
@@ -55,7 +56,7 @@ static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
{
- unsigned int bmap_size;
+ unsigned int count;
const __be32 *prop = of_get_property(phb->hose->dn,
"ibm,opal-msi-ranges", NULL);
if (!prop)
@@ -67,10 +68,8 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
if (of_device_is_compatible(phb->hose->dn, "ibm,p5ioc2-pcix"))
return;
phb->msi_base = be32_to_cpup(prop);
- phb->msi_count = be32_to_cpup(prop + 1);
- bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
- phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
- if (!phb->msi_map) {
+ count = be32_to_cpup(prop + 1);
+ if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
phb->hose->global_number);
return;
@@ -78,7 +77,7 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
phb->msi_setup = pnv_pci_p5ioc2_msi_setup;
phb->msi32_support = 0;
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- phb->msi_count, phb->msi_base);
+ count, phb->msi_base);
}
#else
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b8b8e0bd9897..55dfca844ddf 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -26,6 +26,7 @@
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
@@ -47,43 +48,7 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
- return (phb && phb->msi_map) ? 0 : -ENODEV;
-}
-
-static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
-{
- unsigned long flags;
- unsigned int id, rc;
-
- spin_lock_irqsave(&phb->lock, flags);
-
- id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
- if (id >= phb->msi_count && phb->msi_next)
- id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
- if (id >= phb->msi_count) {
- rc = 0;
- goto out;
- }
- __set_bit(id, phb->msi_map);
- rc = id + phb->msi_base;
-out:
- spin_unlock_irqrestore(&phb->lock, flags);
- return rc;
-}
-
-static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
-{
- unsigned long flags;
- unsigned int id;
-
- if (WARN_ON(hwirq < phb->msi_base ||
- hwirq >= (phb->msi_base + phb->msi_count)))
- return;
- id = hwirq - phb->msi_base;
-
- spin_lock_irqsave(&phb->lock, flags);
- __clear_bit(id, phb->msi_map);
- spin_unlock_irqrestore(&phb->lock, flags);
+ return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
}
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -92,7 +57,8 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
struct msi_msg msg;
- unsigned int hwirq, virq;
+ int hwirq;
+ unsigned int virq;
int rc;
if (WARN_ON(!phb))
@@ -104,25 +70,25 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
pci_name(pdev));
return -ENXIO;
}
- hwirq = pnv_get_one_msi(phb);
- if (!hwirq) {
+ hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
+ if (hwirq < 0) {
pr_warn("%s: Failed to find a free MSI\n",
pci_name(pdev));
return -ENOSPC;
}
- virq = irq_create_mapping(NULL, hwirq);
+ virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
if (virq == NO_IRQ) {
pr_warn("%s: Failed to map MSI to linux irq\n",
pci_name(pdev));
- pnv_put_msi(phb, hwirq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
return -ENOMEM;
}
- rc = phb->msi_setup(phb, pdev, hwirq, entry->msi_attrib.is_64,
- &msg);
+ rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
+ virq, entry->msi_attrib.is_64, &msg);
if (rc) {
pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
irq_dispose_mapping(virq);
- pnv_put_msi(phb, hwirq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
return rc;
}
irq_set_msi_desc(virq, entry);
@@ -144,7 +110,8 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
- pnv_put_msi(phb, virq_to_hw(entry->irq));
+ msi_bitmap_free_hwirqs(&phb->msi_bmp,
+ virq_to_hw(entry->irq) - phb->msi_base, 1);
irq_dispose_mapping(entry->irq);
}
}
@@ -362,48 +329,6 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
-
-static void pnv_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
-{
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
- unsigned long start, end, inc;
-
- start = __pa(startp);
- end = __pa(endp);
-
-
- /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
- if (tbl->it_busno) {
- start <<= 12;
- end <<= 12;
- inc = 128 << 12;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- }
- /* p7ioc-style invalidation, 2 TCEs per write */
- else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
- start |= (1ull << 63);
- end |= (1ull << 63);
- inc = 16;
- }
- /* Default (older HW) */
- else
- inc = 128;
-
- end |= inc - 1; /* round up end to be different than start */
-
- mb(); /* Ensure above stores are visible */
- while (start <= end) {
- __raw_writeq(start, invalidate);
- start += inc;
- }
- /* The iommu layer will do another mb() for us on build() and
- * we don't care on free()
- */
-}
-
-
static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
@@ -428,7 +353,7 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
* of flags if that becomes the case
*/
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- pnv_tce_invalidate(tbl, tces, tcep - 1);
+ pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
return 0;
}
@@ -442,8 +367,8 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
while (npages--)
*(tcep++) = 0;
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_tce_invalidate(tbl, tces, tcep - 1);
+ if (tbl->it_type & TCE_PCI_SWINV_CREATE)
+ pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
}
static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
@@ -525,7 +450,7 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
-/* Fixup wrong class code in p7ioc root complex */
+/* Fixup wrong class code in p7ioc and p8 root complex */
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
@@ -591,6 +516,10 @@ void __init pnv_pci_init(void)
if (!found_ioda)
for_each_compatible_node(np, NULL, "ibm,p5ioc2")
pnv_pci_init_p5ioc2_hub(np);
+
+ /* Look for ioda2 built-in PHB3's */
+ for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
+ pnv_pci_init_ioda2_phb(np);
}
/* Setup the linkage between OF nodes and PHBs */
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7cfb7c883deb..48dc4bb856a1 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -4,9 +4,9 @@
struct pci_dn;
enum pnv_phb_type {
- PNV_PHB_P5IOC2,
- PNV_PHB_IODA1,
- PNV_PHB_IODA2,
+ PNV_PHB_P5IOC2 = 0,
+ PNV_PHB_IODA1 = 1,
+ PNV_PHB_IODA2 = 2,
};
/* Precise PHB model for error management */
@@ -14,6 +14,7 @@ enum pnv_phb_model {
PNV_PHB_MODEL_UNKNOWN,
PNV_PHB_MODEL_P5IOC2,
PNV_PHB_MODEL_P7IOC,
+ PNV_PHB_MODEL_PHB3,
};
#define PNV_PCI_DIAG_BUF_SIZE 4096
@@ -22,8 +23,10 @@ enum pnv_phb_model {
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
/* Data associated with a PE, including IOMMU tracking etc.. */
+struct pnv_phb;
struct pnv_ioda_pe {
unsigned long flags;
+ struct pnv_phb *phb;
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
@@ -73,15 +76,13 @@ struct pnv_phb {
spinlock_t lock;
#ifdef CONFIG_PCI_MSI
- unsigned long *msi_map;
unsigned int msi_base;
- unsigned int msi_count;
- unsigned int msi_next;
unsigned int msi32_support;
+ struct msi_bitmap msi_bmp;
#endif
int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int is_64,
- struct msi_msg *msg);
+ unsigned int hwirq, unsigned int virq,
+ unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
@@ -109,6 +110,10 @@ struct pnv_phb {
unsigned int *io_segmap;
struct pnv_ioda_pe *pe_array;
+ /* IRQ chip */
+ int irq_chip_init;
+ struct irq_chip irq_chip;
+
/* Sorted list of used PE's based
* on the sequence of creation
*/
@@ -150,6 +155,7 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
u64 dma_offset);
extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
-
-
+extern void pnv_pci_init_ioda2_phb(struct device_node *np);
+extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 0bdc735db16f..6a3ecca5b725 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -77,9 +77,11 @@ int pnv_smp_kick_cpu(int nr)
if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
rc = opal_start_cpu(pcpu, start_here);
- if (rc != OPAL_SUCCESS)
+ if (rc != OPAL_SUCCESS) {
pr_warn("OPAL Error %ld starting CPU %d\n",
rc, nr);
+ return -ENODEV;
+ }
}
return smp_generic_kick_cpu(nr);
}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
deleted file mode 100644
index 1547f66235d9..000000000000
--- a/arch/powerpc/platforms/prep/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-config PPC_PREP
- bool "PowerPC Reference Platform (PReP) based machines"
- depends on 6xx && BROKEN
- select HAVE_PCSPKR_PLATFORM
- select MPIC
- select PPC_I8259
- select PPC_INDIRECT_PCI
- select PPC_UDBG_16550
- select PPC_NATIVE
- default n
-
-config PREP_RESIDUAL
- bool "Support for PReP Residual Data"
- depends on PPC_PREP
- help
- Some PReP systems have residual data passed to the kernel by the
- firmware. This allows detection of memory size, devices present and
- other useful pieces of information. Sometimes this information is
- not present or incorrect, in which case it could lead to the machine
- behaving incorrectly. If this happens, either disable PREP_RESIDUAL
- or pass the 'noresidual' option to the kernel.
-
- If you are running a PReP system, say Y here, otherwise say N.
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 6cc58201db8c..177a2f70700c 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -46,7 +46,7 @@ static DEFINE_SPINLOCK(ps3_htab_lock);
static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
int result;
u64 hpte_v, hpte_r;
@@ -62,8 +62,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
*/
vflags &= ~HPTE_V_SECONDARY;
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -117,7 +117,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long flags;
long ret;
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
spin_lock_irqsave(&ps3_htab_lock, flags);
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index 40b5cb433005..cba1e6be68e5 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -89,10 +89,8 @@ static int __init ps3_rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_RET(pdev);
}
module_init(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index aa3693f7fb27..8c80588abacc 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -28,18 +28,18 @@
#include "pseries.h"
-typedef struct {
+struct hypertas_fw_feature {
unsigned long val;
char * name;
-} firmware_feature_t;
+};
/*
* The names in this table match names in rtas/ibm,hypertas-functions. If the
* entry ends in a '*', only upto the '*' is matched. Otherwise the entire
* string must match.
*/
-static __initdata firmware_feature_t
-firmware_features_table[FIRMWARE_MAX_FEATURES] = {
+static __initdata struct hypertas_fw_feature
+hypertas_fw_features_table[] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
@@ -69,20 +69,18 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
-void __init fw_feature_init(const char *hypertas, unsigned long len)
+void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
{
const char *s;
int i;
- pr_debug(" -> fw_feature_init()\n");
+ pr_debug(" -> fw_hypertas_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
- for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
- const char *name = firmware_features_table[i].name;
+ for (i = 0; i < ARRAY_SIZE(hypertas_fw_features_table); i++) {
+ const char *name = hypertas_fw_features_table[i].name;
size_t size;
- /* check value against table of strings */
- if (!name)
- continue;
+
/*
* If there is a '*' at the end of name, only check
* upto there
@@ -96,10 +94,40 @@ void __init fw_feature_init(const char *hypertas, unsigned long len)
/* we have a match */
powerpc_firmware_features |=
- firmware_features_table[i].val;
+ hypertas_fw_features_table[i].val;
break;
}
}
- pr_debug(" <- fw_feature_init()\n");
+ pr_debug(" <- fw_hypertas_feature_init()\n");
+}
+
+struct vec5_fw_feature {
+ unsigned long val;
+ unsigned int feature;
+};
+
+static __initdata struct vec5_fw_feature
+vec5_fw_features_table[] = {
+ {FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
+ {FW_FEATURE_PRRN, OV5_PRRN},
+};
+
+void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
+{
+ unsigned int index, feat;
+ int i;
+
+ pr_debug(" -> fw_vec5_feature_init()\n");
+
+ for (i = 0; i < ARRAY_SIZE(vec5_fw_features_table); i++) {
+ index = OV5_INDX(vec5_fw_features_table[i].feature);
+ feat = OV5_FEAT(vec5_fw_features_table[i].feature);
+
+ if (vec5[index] & feat)
+ powerpc_firmware_features |=
+ vec5_fw_features_table[i].val;
+ }
+
+ pr_debug(" <- fw_vec5_feature_init()\n");
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2372c609fa2b..9a432de363b8 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void)
return get_memblock_size();
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
unsigned long start, start_pfn;
@@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np)
ret = pseries_remove_memblock(base, lmb_size);
return ret;
}
+#else
+static inline int pseries_remove_memblock(unsigned long base,
+ unsigned int memblock_size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pseries_remove_memory(struct device_node *np)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
static int pseries_add_memory(struct device_node *np)
{
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1b2a174e7c59..86ae364900d6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -924,6 +924,13 @@ static void restore_default_window(struct pci_dev *dev,
__restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
}
+struct failed_ddw_pdn {
+ struct device_node *pdn;
+ struct list_head list;
+};
+
+static LIST_HEAD(failed_ddw_pdn_list);
+
/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
@@ -951,6 +958,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
struct dynamic_dma_window_prop *ddwprop;
const void *dma_window = NULL;
unsigned long liobn, offset, size;
+ struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -959,6 +967,18 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_unlock;
/*
+ * If we already went through this for a previous function of
+ * the same device and failed, we don't want to muck with the
+ * DMA window again, as it will race with in-flight operations
+ * and can lead to EEHs. The above mutex protects access to the
+ * list.
+ */
+ list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
+ if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
+ goto out_unlock;
+ }
+
+ /*
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
* ibm,create-pe-dma-window
@@ -1114,6 +1134,12 @@ out_restore_window:
if (ddw_restore_token)
restore_default_window(dev, ddw_restore_token);
+ fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
+ if (!fpdn)
+ goto out_unlock;
+ fpdn->pdn = pdn;
+ list_add(&fpdn->list, &failed_ddw_pdn_list);
+
out_unlock:
mutex_unlock(&direct_window_init_mutex);
return dma_addr;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355a..6d62072a7d5a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -109,7 +109,7 @@ void vpa_init(int cpu)
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
unsigned long flags;
@@ -121,8 +121,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
"pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -155,7 +155,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" lpar err %lu\n", lpar_rc);
+ pr_devel(" lpar err %ld\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
- BUG_ON(lpar_rc != H_NOT_FOUND);
+
+ /*
+ * The test for adjunct partition is performed before the
+ * ANDCOND test. H_RESOURCE may be returned, so we need to
+ * check for that as well.
+ */
+ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
slot_offset++;
slot_offset &= 0x7;
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 6573808cc5f3..3d01eee9ffb1 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -37,14 +37,16 @@ struct update_props_workarea {
#define UPDATE_DT_NODE 0x02000000
#define ADD_DT_NODE 0x03000000
-static int mobility_rtas_call(int token, char *buf)
+#define MIGRATION_SCOPE (1)
+
+static int mobility_rtas_call(int token, char *buf, s32 scope)
{
int rc;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1);
+ rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
@@ -123,7 +125,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(u32 phandle)
+static int update_dt_node(u32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@@ -132,6 +134,7 @@ static int update_dt_node(u32 phandle)
char *prop_data;
char *rtas_buf;
int update_properties_token;
+ u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
if (update_properties_token == RTAS_UNKNOWN_SERVICE)
@@ -151,19 +154,31 @@ static int update_dt_node(u32 phandle)
upwa->phandle = phandle;
do {
- rc = mobility_rtas_call(update_properties_token, rtas_buf);
+ rc = mobility_rtas_call(update_properties_token, rtas_buf,
+ scope);
if (rc < 0)
break;
prop_data = rtas_buf + sizeof(*upwa);
- for (i = 0; i < upwa->nprops; i++) {
+ /* The first element of the buffer is the path of the node
+ * being updated in the form of a 8 byte string length
+ * followed by the string. Skip past this to get to the
+ * properties being updated.
+ */
+ vd = *prop_data++;
+ prop_data += vd;
+
+ /* The path we skipped over is counted as one of the elements
+ * returned so start counting at one.
+ */
+ for (i = 1; i < upwa->nprops; i++) {
char *prop_name;
- u32 vd;
- prop_name = prop_data + 1;
+ prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
- vd = *prop_data++;
+ vd = *(u32 *)prop_data;
+ prop_data += sizeof(vd);
switch (vd) {
case 0x00000000:
@@ -219,7 +234,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
return rc;
}
-static int pseries_devicetree_update(void)
+int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
u32 *data;
@@ -235,7 +250,7 @@ static int pseries_devicetree_update(void)
return -ENOMEM;
do {
- rc = mobility_rtas_call(update_nodes_token, rtas_buf);
+ rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
if (rc && rc != 1)
break;
@@ -256,7 +271,7 @@ static int pseries_devicetree_update(void)
delete_dt_node(phandle);
break;
case UPDATE_DT_NODE:
- update_dt_node(phandle);
+ update_dt_node(phandle, scope);
break;
case ADD_DT_NODE:
drc_index = *data++;
@@ -276,7 +291,7 @@ void post_mobility_fixup(void)
int rc;
int activate_fw_token;
- rc = pseries_devicetree_update();
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc) {
printk(KERN_ERR "Initial post-mobility device tree update "
"failed: %d\n", rc);
@@ -292,7 +307,7 @@ void post_mobility_fixup(void)
rc = rtas_call(activate_fw_token, 0, 1, NULL);
if (!rc) {
- rc = pseries_devicetree_update();
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc)
printk(KERN_ERR "Secondary post-mobility device tree "
"update failed: %d\n", rc);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index e5b084723131..420524e6f8c9 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -24,6 +24,7 @@ static int query_token, change_token;
#define RTAS_RESET_FN 2
#define RTAS_CHANGE_MSI_FN 3
#define RTAS_CHANGE_MSIX_FN 4
+#define RTAS_CHANGE_32MSI_FN 5
static struct pci_dn *get_pdn(struct pci_dev *pdev)
{
@@ -58,7 +59,8 @@ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
seq_num = 1;
do {
- if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN)
+ if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
+ func == RTAS_CHANGE_32MSI_FN)
rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
@@ -426,9 +428,12 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
*/
again:
if (type == PCI_CAP_ID_MSI) {
- rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
+ if (pdn->force_32bit_msi)
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
+ else
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
- if (rc < 0) {
+ if (rc < 0 && !pdn->force_32bit_msi) {
pr_debug("rtas_msi: trying the old firmware call.\n");
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
}
@@ -512,3 +517,13 @@ static int rtas_msi_init(void)
return 0;
}
arch_initcall(rtas_msi_init);
+
+static void quirk_radeon(struct pci_dev *dev)
+{
+ struct pci_dn *pdn = get_pdn(dev);
+
+ if (pdn)
+ pdn->force_32bit_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 0b580f413a9a..5f93856cdf47 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -108,3 +108,56 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
fixup_winbond_82c105);
+
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct device_node *dn, *pdn;
+ struct pci_bus *bus;
+ const uint32_t *pcie_link_speed_stats;
+
+ bus = bridge->bus;
+
+ dn = pcibios_get_phb_of_node(bus);
+ if (!dn)
+ return 0;
+
+ for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
+ pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+ "ibm,pcie-link-speed-stats", NULL);
+ if (pcie_link_speed_stats)
+ break;
+ }
+
+ of_node_put(pdn);
+
+ if (!pcie_link_speed_stats) {
+ pr_err("no ibm,pcie-link-speed-stats property\n");
+ return 0;
+ }
+
+ switch (pcie_link_speed_stats[0]) {
+ case 0x01:
+ bus->max_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ bus->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ switch (pcie_link_speed_stats[1]) {
+ case 0x01:
+ bus->cur_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->cur_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index f368668d97b3..f35787b6a5e0 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -58,40 +58,39 @@ static inline long extended_cede_processor(unsigned long latency_hint)
static inline long vpa_call(unsigned long flags, unsigned long cpu,
unsigned long vpa)
{
- /* flags are in bits 16-18 (counting from most significant bit) */
- flags = flags << (63 - 18);
+ flags = flags << H_VPA_FUNC_SHIFT;
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
}
static inline long unregister_vpa(unsigned long cpu)
{
- return vpa_call(0x5, cpu, 0);
+ return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
}
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(0x1, cpu, vpa);
+ return vpa_call(H_VPA_REG_VPA, cpu, vpa);
}
static inline long unregister_slb_shadow(unsigned long cpu)
{
- return vpa_call(0x7, cpu, 0);
+ return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
}
static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(0x3, cpu, vpa);
+ return vpa_call(H_VPA_REG_SLB, cpu, vpa);
}
static inline long unregister_dtl(unsigned long cpu)
{
- return vpa_call(0x6, cpu, 0);
+ return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
}
static inline long register_dtl(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(0x2, cpu, vpa);
+ return vpa_call(H_VPA_REG_DTL, cpu, vpa);
}
static inline long plpar_page_set_loaned(unsigned long vpa)
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4d806b419606..4644efa06941 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -23,8 +23,8 @@
#include "pseries.h"
struct cpuidle_driver pseries_idle_driver = {
- .name = "pseries_idle",
- .owner = THIS_MODULE,
+ .name = "pseries_idle",
+ .owner = THIS_MODULE,
};
#define MAX_IDLE_STATE_COUNT 2
@@ -33,10 +33,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
static struct cpuidle_device __percpu *pseries_cpuidle_devices;
static struct cpuidle_state *cpuidle_state_table;
-static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
+static inline void idle_loop_prolog(unsigned long *in_purr)
{
-
- *kt_before = ktime_get();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
@@ -45,12 +43,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
get_lppaca()->idle = 1;
}
-static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
+static inline void idle_loop_epilog(unsigned long in_purr)
{
get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->idle = 0;
-
- return ktime_to_us(ktime_sub(ktime_get(), kt_before));
}
static int snooze_loop(struct cpuidle_device *dev,
@@ -58,10 +54,9 @@ static int snooze_loop(struct cpuidle_device *dev,
int index)
{
unsigned long in_purr;
- ktime_t kt_before;
int cpu = dev->cpu;
- idle_loop_prolog(&in_purr, &kt_before);
+ idle_loop_prolog(&in_purr);
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
@@ -75,8 +70,8 @@ static int snooze_loop(struct cpuidle_device *dev,
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb();
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
+ idle_loop_epilog(in_purr);
+
return index;
}
@@ -102,9 +97,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
int index)
{
unsigned long in_purr;
- ktime_t kt_before;
- idle_loop_prolog(&in_purr, &kt_before);
+ idle_loop_prolog(&in_purr);
get_lppaca()->donate_dedicated_cpu = 1;
ppc64_runlatch_off();
@@ -112,8 +106,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor();
get_lppaca()->donate_dedicated_cpu = 0;
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
+
+ idle_loop_epilog(in_purr);
+
return index;
}
@@ -122,9 +117,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
int index)
{
unsigned long in_purr;
- ktime_t kt_before;
- idle_loop_prolog(&in_purr, &kt_before);
+ idle_loop_prolog(&in_purr);
/*
* Yield the processor to the hypervisor. We return if
@@ -135,8 +129,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
*/
check_and_cede_processor();
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
+ idle_loop_epilog(in_purr);
+
return index;
}
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9a3dda07566f..c2a3a258001c 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -19,7 +19,10 @@ extern void request_event_sources_irqs(struct device_node *np,
#include <linux/of.h>
-extern void __init fw_feature_init(const char *hypertas, unsigned long len);
+extern void __init fw_hypertas_feature_init(const char *hypertas,
+ unsigned long len);
+extern void __init fw_vec5_feature_init(const char *hypertas,
+ unsigned long len);
struct pt_regs;
@@ -60,4 +63,8 @@ extern int dlpar_detach_node(struct device_node *);
/* Snooze Delay, pseries_idle */
DECLARE_PER_CPU(long, smt_snooze_delay);
+/* PCI root bridge prepare function override for pseries */
+struct pci_host_bridge;
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d6491bd481d0..f93cdf55628c 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -452,7 +452,7 @@ static int proc_ppc64_create_ofdt(void)
ent = proc_create("powerpc/ofdt", S_IWUSR, NULL, &ofdt_fops);
if (ent)
- ent->size = 0;
+ proc_set_size(ent, 0);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 47f3cda2a68b..b502ab61aafa 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -41,13 +41,12 @@
static unsigned int ibm_scan_log_dump; /* RTAS token */
-static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
+static unsigned int *scanlog_buffer; /* The data buffer */
static ssize_t scanlog_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file_inode(file));
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
int status;
unsigned long len, off;
unsigned int wait_time;
@@ -135,8 +134,7 @@ static ssize_t scanlog_write(struct file * file, const char __user * buf,
static int scanlog_open(struct inode * inode, struct file * file)
{
- struct proc_dir_entry *dp = PDE(inode);
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
if (data[0] != 0) {
/* This imperfect test stops a second copy of the
@@ -152,11 +150,9 @@ static int scanlog_open(struct inode * inode, struct file * file)
static int scanlog_release(struct inode * inode, struct file * file)
{
- struct proc_dir_entry *dp = PDE(inode);
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
data[0] = 0;
-
return 0;
}
@@ -172,7 +168,6 @@ const struct file_operations scanlog_fops = {
static int __init scanlog_init(void)
{
struct proc_dir_entry *ent;
- void *data;
int err = -ENOMEM;
ibm_scan_log_dump = rtas_token("ibm,scan-log-dump");
@@ -180,29 +175,24 @@ static int __init scanlog_init(void)
return -ENODEV;
/* Ideally we could allocate a buffer < 4G */
- data = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!data)
+ scanlog_buffer = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!scanlog_buffer)
goto err;
- ent = proc_create_data("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
- &scanlog_fops, data);
+ ent = proc_create("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
+ &scanlog_fops);
if (!ent)
goto err;
-
- proc_ppc64_scan_log_dump = ent;
-
return 0;
err:
- kfree(data);
+ kfree(scanlog_buffer);
return err;
}
static void __exit scanlog_cleanup(void)
{
- if (proc_ppc64_scan_log_dump) {
- kfree(proc_ppc64_scan_log_dump->data);
- remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent);
- }
+ remove_proc_entry("powerpc/rtas/scan-log-dump", NULL);
+ kfree(scanlog_buffer);
}
module_init(scanlog_init);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8bcc9ca6682f..c11c8238797c 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -466,6 +466,8 @@ static void __init pSeries_setup_arch(void)
else
ppc_md.enable_pmcs = power4_enable_pmcs;
+ ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
+
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
long rc;
if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) {
@@ -628,25 +630,39 @@ static void __init pSeries_init_early(void)
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init pSeries_probe_hypertas(unsigned long node,
- const char *uname, int depth,
- void *data)
+static int __init pseries_probe_fw_features(unsigned long node,
+ const char *uname, int depth,
+ void *data)
{
- const char *hypertas;
+ const char *prop;
unsigned long len;
+ static int hypertas_found;
+ static int vec5_found;
- if (depth != 1 ||
- (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
+ if (depth != 1)
return 0;
- hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len);
- if (!hypertas)
- return 1;
+ if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
+ prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
+ &len);
+ if (prop) {
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+ fw_hypertas_feature_init(prop, len);
+ }
- powerpc_firmware_features |= FW_FEATURE_LPAR;
- fw_feature_init(hypertas, len);
+ hypertas_found = 1;
+ }
- return 1;
+ if (!strcmp(uname, "chosen")) {
+ prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
+ &len);
+ if (prop)
+ fw_vec5_feature_init(prop, len);
+
+ vec5_found = 1;
+ }
+
+ return hypertas_found && vec5_found;
}
static int __init pSeries_probe(void)
@@ -669,7 +685,7 @@ static int __init pSeries_probe(void)
pr_debug("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
- of_scan_flat_dt(pSeries_probe_hypertas, NULL);
+ of_scan_flat_dt(pseries_probe_fw_features, NULL);
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index 79d2225b7608..422a175b10ee 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -9,7 +9,6 @@ config PPC_WSP
select PCI
select PPC_IO_WORKAROUNDS if PCI
select PPC_INDIRECT_PIO if PCI
- select PPC_WSP_COPRO
default n
menu "WSP platform selection"
@@ -29,7 +28,3 @@ config PPC_CHROMA
default y
endmenu
-
-config PPC_A2_DD2
- bool "Support for DD2 based A2/WSP systems"
- depends on PPC_A2
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index 8e22f561d171..62cb527493e7 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -502,7 +502,7 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
(~(hose->mem_resources[0].end -
hose->mem_resources[0].start)) & 0x3ffffff0000ul);
out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
- (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
+ (hose->mem_resources[0].start - hose->mem_offset[0]) | 1);
/* Clear all TVT entries
*
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index a84fecf63c4d..ab4cb5476472 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -19,6 +19,7 @@ config PPC_MSI_BITMAP
default y if MPIC
default y if FSL_PCI
default y if PPC4xx_MSI
+ default y if POWERNV_MSI
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 178c99427b1c..ab02db3d02d8 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -333,6 +333,8 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
return 0;
}
+static struct lock_class_key fsl_msi_irq_class;
+
static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
int offset, int irq_index)
{
@@ -351,7 +353,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
dev_err(&dev->dev, "No memory for MSI cascade data\n");
return -ENOMEM;
}
-
+ irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
msi->msi_virqs[irq_index] = virt_msir;
cascade_data->index = offset;
cascade_data->msi_data = msi;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 682084dba19b..028ac1f71b51 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -54,16 +54,63 @@ static void quirk_fsl_pcie_header(struct pci_dev *dev)
return;
}
-static int __init fsl_pcie_check_link(struct pci_controller *hose)
+static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
+ int, int, u32 *);
+
+static int fsl_pcie_check_link(struct pci_controller *hose)
{
- u32 val;
+ u32 val = 0;
+
+ if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
+ if (hose->ops->read == fsl_indirect_read_config) {
+ struct pci_bus bus;
+ bus.number = 0;
+ bus.sysdata = hose;
+ bus.ops = hose->ops;
+ indirect_read_config(&bus, 0, PCIE_LTSSM, 4, &val);
+ } else
+ early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
+ if (val < PCIE_LTSSM_L0)
+ return 1;
+ } else {
+ struct ccsr_pci __iomem *pci = hose->private_data;
+ /* for PCIe IP rev 3.0 or greater use CSR0 for link state */
+ val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
+ >> PEX_CSR0_LTSSM_SHIFT;
+ if (val != PEX_CSR0_LTSSM_L0)
+ return 1;
+ }
- early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
- if (val < PCIE_LTSSM_L0)
- return 1;
return 0;
}
+static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+
+ if (fsl_pcie_check_link(hose))
+ hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+ else
+ hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+
+ return indirect_read_config(bus, devfn, offset, len, val);
+}
+
+static struct pci_ops fsl_indirect_pci_ops =
+{
+ .read = fsl_indirect_read_config,
+ .write = indirect_write_config,
+};
+
+static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
+ resource_size_t cfg_addr,
+ resource_size_t cfg_data, u32 flags)
+{
+ setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
+ hose->ops = &fsl_indirect_pci_ops;
+}
+
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
#define MAX_PHYS_ADDR_BITS 40
@@ -106,7 +153,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci,
flags |= 0x10000000; /* enable relaxed ordering */
for (i = 0; size > 0; i++) {
- unsigned int bits = min(__ilog2(size),
+ unsigned int bits = min(ilog2(size),
__ffs(pci_addr | phys_addr));
if (index + i >= 5)
@@ -126,13 +173,12 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci,
}
/* atmu setup for fsl pci/pcie controller */
-static void setup_pci_atmu(struct pci_controller *hose,
- struct resource *rsrc)
+static void setup_pci_atmu(struct pci_controller *hose)
{
- struct ccsr_pci __iomem *pci;
+ struct ccsr_pci __iomem *pci = hose->private_data;
int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
u64 mem, sz, paddr_hi = 0;
- u64 paddr_lo = ULLONG_MAX;
+ u64 offset = 0, paddr_lo = ULLONG_MAX;
u32 pcicsrbar = 0, pcicsrbar_sz;
u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
@@ -140,15 +186,6 @@ static void setup_pci_atmu(struct pci_controller *hose,
const u64 *reg;
int len;
- pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
- (u64)rsrc->start, (u64)resource_size(rsrc));
-
- pci = ioremap(rsrc->start, resource_size(rsrc));
- if (!pci) {
- dev_err(hose->parent, "Unable to map ATMU registers\n");
- return;
- }
-
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
win_idx = 2;
@@ -171,8 +208,9 @@ static void setup_pci_atmu(struct pci_controller *hose,
paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
- n = setup_one_atmu(pci, j, &hose->mem_resources[i],
- hose->pci_mem_offset);
+ /* We assume all memory resources have the same offset */
+ offset = hose->mem_offset[i];
+ n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
if (n < 0 || j >= 5) {
pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
@@ -196,23 +234,23 @@ static void setup_pci_atmu(struct pci_controller *hose,
out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
/* Enable, IO R/W */
out_be32(&pci->pow[j].powar, 0x80088000
- | (__ilog2(hose->io_resource.end
+ | (ilog2(hose->io_resource.end
- hose->io_resource.start + 1) - 1));
}
}
/* convert to pci address space */
- paddr_hi -= hose->pci_mem_offset;
- paddr_lo -= hose->pci_mem_offset;
+ paddr_hi -= offset;
+ paddr_lo -= offset;
if (paddr_hi == paddr_lo) {
pr_err("%s: No outbound window space\n", name);
- goto out;
+ return;
}
if (paddr_lo == 0) {
pr_err("%s: No space for inbound window\n", name);
- goto out;
+ return;
}
/* setup PCSRBAR/PEXCSRBAR */
@@ -261,7 +299,7 @@ static void setup_pci_atmu(struct pci_controller *hose,
}
sz = min(mem, paddr_lo);
- mem_log = __ilog2_u64(sz);
+ mem_log = ilog2(sz);
/* PCIe can overmap inbound & outbound since RX & TX are separated */
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
@@ -290,7 +328,7 @@ static void setup_pci_atmu(struct pci_controller *hose,
* SWIOTLB and access the full range of memory
*/
if (sz != mem) {
- mem_log = __ilog2_u64(mem);
+ mem_log = ilog2(mem);
/* Size window up if we dont fit in exact power-of-2 */
if ((1ull << mem_log) != mem)
@@ -327,7 +365,7 @@ static void setup_pci_atmu(struct pci_controller *hose,
sz -= 1ull << mem_log;
if (sz) {
- mem_log = __ilog2_u64(sz);
+ mem_log = ilog2(sz);
piwar |= (mem_log - 1);
out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
@@ -358,9 +396,6 @@ static void setup_pci_atmu(struct pci_controller *hose,
pr_info("%s: DMA window size is 0x%llx\n", name,
(u64)hose->dma_window_size);
}
-
-out:
- iounmap(pci);
}
static void __init setup_pci_cmd(struct pci_controller *hose)
@@ -429,6 +464,7 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
const int *bus_range;
u8 hdr_type, progif;
struct device_node *dev;
+ struct ccsr_pci __iomem *pci;
dev = pdev->dev.of_node;
@@ -461,8 +497,18 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
- setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
- PPC_INDIRECT_TYPE_BIG_ENDIAN);
+ pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
+ (u64)rsrc.start, (u64)resource_size(&rsrc));
+
+ pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
+ if (!hose->private_data)
+ goto no_bridge;
+
+ fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+ PPC_INDIRECT_TYPE_BIG_ENDIAN);
+
+ if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
+ hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
/* For PCIE read HEADER_TYPE to identify controler mode */
@@ -500,11 +546,12 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
pci_process_bridge_OF_ranges(hose, dev, is_primary);
/* Setup PEX window registers */
- setup_pci_atmu(hose, &rsrc);
+ setup_pci_atmu(hose);
return 0;
no_bridge:
+ iounmap(hose->private_data);
/* unmap cfg_data & cfg_addr separately if not on same page */
if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
((unsigned long)hose->cfg_addr & PAGE_MASK))
@@ -681,6 +728,7 @@ static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
WARN_ON(hose->dn->data);
hose->dn->data = pcie;
hose->ops = &mpc83xx_pcie_ops;
+ hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
@@ -766,8 +814,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
if (ret)
goto err0;
} else {
- setup_indirect_pci(hose, rsrc_cfg.start,
- rsrc_cfg.start + 4, 0);
+ fsl_setup_indirect_pci(hose, rsrc_cfg.start,
+ rsrc_cfg.start + 4, 0);
}
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
@@ -836,6 +884,7 @@ static const struct of_device_id pci_ids[] = {
{ .compatible = "fsl,qoriq-pcie-v2.2", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
{ .compatible = "fsl,qoriq-pcie-v2.4", },
+ { .compatible = "fsl,qoriq-pcie-v3.0", },
/*
* The following entries are for compatibility with older device
@@ -927,7 +976,7 @@ static int fsl_pci_resume(struct device *dev)
return -ENODEV;
}
- setup_pci_atmu(hose, &pci_rsrc);
+ setup_pci_atmu(hose);
return 0;
}
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index c495c00c8740..72b5625330e2 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -14,9 +14,12 @@
#ifndef __POWERPC_FSL_PCI_H
#define __POWERPC_FSL_PCI_H
+struct platform_device;
+
#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
#define PCIE_LTSSM_L0 0x16 /* L0 state */
#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */
+#define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */
#define PIWAR_EN 0x80000000 /* Enable */
#define PIWAR_PF 0x20000000 /* prefetch */
#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */
@@ -89,6 +92,16 @@ struct ccsr_pci {
__be32 pex_err_cap_r1; /* 0x.e2c - PCIE error capture register 0 */
__be32 pex_err_cap_r2; /* 0x.e30 - PCIE error capture register 0 */
__be32 pex_err_cap_r3; /* 0x.e34 - PCIE error capture register 0 */
+ u8 res_e38[200];
+ __be32 pdb_stat; /* 0x.f00 - PCIE Debug Status */
+ u8 res_f04[16];
+ __be32 pex_csr0; /* 0x.f14 - PEX Control/Status register 0*/
+#define PEX_CSR0_LTSSM_MASK 0xFC
+#define PEX_CSR0_LTSSM_SHIFT 2
+#define PEX_CSR0_LTSSM_L0 0x11
+ __be32 pex_csr1; /* 0x.f18 - PEX Control/Status register 1*/
+ u8 res_f1c[228];
+
};
extern int fsl_add_bridge(struct platform_device *pdev, int is_primary);
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 82fdad885d20..c6c8b526a4f6 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -20,9 +20,8 @@
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
-static int
-indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
- int len, u32 *val)
+int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
@@ -78,9 +77,8 @@ indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
return PCIBIOS_SUCCESSFUL;
}
-static int
-indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
- int len, u32 val)
+int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index d30e6a676c89..ee21b5e71aec 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1001,8 +1001,12 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
if (hw == mpic->spurious_vec)
return -EINVAL;
- if (mpic->protected && test_bit(hw, mpic->protected))
- return -EINVAL;
+ if (mpic->protected && test_bit(hw, mpic->protected)) {
+ pr_warning("mpic: Mapping of source 0x%x failed, "
+ "source protected by firmware !\n",\
+ (unsigned int)hw);
+ return -EPERM;
+ }
#ifdef CONFIG_SMP
else if (hw >= mpic->ipi_vecs[0]) {
@@ -1029,8 +1033,12 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
if (mpic_map_error_int(mpic, virq, hw))
return 0;
- if (hw >= mpic->num_sources)
+ if (hw >= mpic->num_sources) {
+ pr_warning("mpic: Mapping of source 0x%x failed, "
+ "source out of range !\n",\
+ (unsigned int)hw);
return -EINVAL;
+ }
mpic_msi_reserve_hwirq(mpic, hw);
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 0f6af41ebb44..4a25c26f0bf4 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -214,15 +214,27 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev(
struct device_node *np, int id)
{
struct platform_device *pdev;
- struct resource r[1];
+ struct resource r[2];
int err;
err = of_address_to_resource(np, 0, &r[0]);
if (err)
return ERR_PTR(err);
+ /* register an orion mdio bus driver */
+ r[1].start = r[0].start + 0x4;
+ r[1].end = r[0].start + 0x84 - 1;
+ r[1].flags = IORESOURCE_MEM;
+
+ if (id == 0) {
+ pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1);
+ if (!pdev)
+ return pdev;
+ }
+
pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id,
- r, 1);
+ &r[0], 1);
+
return pdev;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 56e8b3c3c890..64603a10b863 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -257,6 +257,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
+ resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
@@ -270,7 +271,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
/* Configure the resource */
if (ppc4xx_setup_one_pci_PMM(hose, reg,
res->start,
- res->start - hose->pci_mem_offset,
+ res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
@@ -279,7 +280,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
- if (res->start == hose->pci_mem_offset)
+ if (res->start == offset)
found_isa_hole = 1;
}
}
@@ -457,6 +458,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
+ resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
@@ -470,7 +472,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
/* Configure the resource */
if (ppc4xx_setup_one_pcix_POM(hose, reg,
res->start,
- res->start - hose->pci_mem_offset,
+ res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
@@ -479,7 +481,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
- if (res->start == hose->pci_mem_offset)
+ if (res->start == offset)
found_isa_hole = 1;
}
}
@@ -1792,6 +1794,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
+ resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
@@ -1805,7 +1808,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
/* Configure the resource */
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
res->start,
- res->start - hose->pci_mem_offset,
+ res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
@@ -1814,7 +1817,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
- if (res->start == hose->pci_mem_offset)
+ if (res->start == offset)
found_isa_hole = 1;
}
}
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index 41ac3dfac98e..3c251993bacd 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -22,6 +22,6 @@ config UCC
config QE_USB
bool
- default y if USB_GADGET_FSL_QE
+ default y if USB_FSL_QE
help
QE USB Controller support
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index 9afba924e94f..af79e1ea74b6 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,10 +62,7 @@ static int __init add_rtc(void)
pd = platform_device_register_simple("rtc_cmos", -1,
&res[0], num_res);
- if (IS_ERR(pd))
- return PTR_ERR(pd);
-
- return 0;
+ return PTR_RET(pd);
}
fs_initcall(add_rtc);
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 48861d3fcd07..7cd728b3b5e4 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -51,6 +51,12 @@ static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
static inline unsigned int icp_native_get_xirr(void)
{
int cpu = smp_processor_id();
+ unsigned int xirr;
+
+ /* Handled an interrupt latched by KVM */
+ xirr = kvmppc_get_xics_latch();
+ if (xirr)
+ return xirr;
return in_be32(&icp_native_regs[cpu]->xirr.word);
}
@@ -81,7 +87,7 @@ static void icp_native_set_cpu_priority(unsigned char cppr)
iosync();
}
-static void icp_native_eoi(struct irq_data *d)
+void icp_native_eoi(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
@@ -138,6 +144,7 @@ static unsigned int icp_native_get_irq(void)
static void icp_native_cause_ipi(int cpu, unsigned long data)
{
+ kvmppc_set_host_ipi(cpu, 1);
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
@@ -151,6 +158,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
+ kvmppc_set_host_ipi(cpu, 0);
icp_native_set_qirr(cpu, 0xff);
return smp_ipi_demux();
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 13f85defabed..96bf5bd30fbc 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1430,7 +1430,7 @@ static void excprint(struct pt_regs *fp)
printf(" sp: %lx\n", fp->gpr[1]);
printf(" msr: %lx\n", fp->msr);
- if (trap == 0x300 || trap == 0x380 || trap == 0x600) {
+ if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) {
printf(" dar: %lx\n", fp->dar);
if (trap != 0x380)
printf(" dsisr: %lx\n", fp->dsisr);
@@ -2947,7 +2947,7 @@ static void sysrq_handle_xmon(int key)
static struct sysrq_key_op sysrq_xmon_op = {
.handler = sysrq_handle_xmon,
- .help_msg = "Xmon",
+ .help_msg = "xmon(x)",
.action_msg = "Entering xmon",
};