summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h26
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/lib/code-patching.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c9
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c6
-rw-r--r--arch/powerpc/platforms/pseries/svm.c3
7 files changed, 45 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b6ac4f86c87b..433d164374cb 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -90,6 +90,34 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
#endif
/*
+ * With 4K page size the real_pte machinery is all nops.
+ */
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
+{
+ return (real_pte_t){pte};
+}
+
+#define __rpte_to_pte(r) ((r).pte)
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+ return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT;
+}
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
+ do { \
+ index = 0; \
+ shift = mmu_psize_defs[psize].shift; \
+
+#define pte_iterate_hashed_end() } while(0)
+
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
+
+/*
* 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
* a matter of returning the PTE bits that need to be modified. On 64K PTE,
* things are a little more involved and hence needs many more parameters to
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6866d860d4f3..cd9088673cea 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -336,32 +336,6 @@ extern unsigned long pci_io_base;
#ifndef __ASSEMBLY__
-/*
- * This is the default implementation of various PTE accessors, it's
- * used in all cases except Book3S with 64K pages where we have a
- * concept of sub-pages
- */
-#ifndef __real_pte
-
-#define __real_pte(e, p, o) ((real_pte_t){(e)})
-#define __rpte_to_pte(r) ((r).pte)
-#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
-
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- index = 0; \
- shift = mmu_psize_defs[psize].shift; \
-
-#define pte_iterate_hashed_end() } while(0)
-
-/*
- * We expect this to be called only for user addresses or kernel virtual
- * addresses other than the linear mapping.
- */
-#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
-
-#endif /* __real_pte */
-
static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long clr,
unsigned long set, int huge)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index d01a0ad57e38..f2378f51cbed 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -16,6 +16,7 @@
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/cpu.h>
+#include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/completion.h>
@@ -1076,6 +1077,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
|| nargs + nret > ARRAY_SIZE(args.args))
return -EINVAL;
+ nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
+ nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
+
/* Copy in args. */
if (copy_from_user(args.args, uargs->args,
nargs * sizeof(rtas_arg_t)) != 0)
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c5ed98823835..e303e3f039a6 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -51,7 +51,7 @@ static int text_area_cpu_up(unsigned int cpu)
{
struct vm_struct *area;
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ area = get_vm_area(PAGE_SIZE, 0);
if (!area) {
WARN_ONCE(1, "Failed to create text area for cpu %d\n",
cpu);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index ed37a93bf858..ea3082f2f9d1 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -190,8 +190,10 @@ static int spufs_fill_dir(struct dentry *dir,
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
- if (ret)
+ if (ret) {
+ dput(dentry);
return ret;
+ }
files++;
}
return 0;
@@ -434,8 +436,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
- if (ret)
+ if (ret) {
+ if (neighbor)
+ put_spu_context(neighbor);
goto out_aff_unlock;
+ }
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index f51fd4ac3f0b..38889a013a86 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -578,8 +578,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
switch(rets[0]) {
case 0:
- result = EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE;
+ result = EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED;
break;
case 1:
result = EEH_STATE_RESET_ACTIVE |
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index 87f001b4c4e4..f12229ce7301 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -56,8 +56,7 @@ void __init svm_swiotlb_init(void)
return;
- memblock_free_early(__pa(vstart),
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+ memblock_free(__pa(vstart), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
panic("SVM: Cannot allocate SWIOTLB buffer");
}