summaryrefslogtreecommitdiff
path: root/arch/m68k/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/include/asm/processor.h10
-rw-r--r--arch/m68k/include/asm/uaccess.h1
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h96
-rw-r--r--arch/m68k/include/asm/uaccess_no.h39
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
8 files changed, 67 insertions, 85 deletions
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index d4f9ccbfa85c..82005d2ff717 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += futex.h
generic-y += hw_irq.h
generic-y += ioctl.h
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d5928d..dda58cfe8c22 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
-static inline int test_bit(int nr, const unsigned long *vaddr)
+static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index f5f790c31bf8..77239e81379b 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -122,16 +122,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
wrusp(usp);
}
-#ifdef CONFIG_MMU
-extern int handle_kernel_fault(struct pt_regs *regs);
-#else
-static inline int handle_kernel_fault(struct pt_regs *regs)
-{
- /* Any fault in kernel is fatal on non-mmu */
- return 0;
-}
-#endif
-
/* Forward declaration, a strange C thing */
struct task_struct;
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index 3fadc4a93d97..67b3481d6020 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -4,6 +4,7 @@
#include <asm/uaccess_mm.h>
#endif
+#include <asm/extable.h>
#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <asm-generic/uaccess-unaligned.h>
#else
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 14054a4e4216..ef856ffeffdf 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -31,24 +31,6 @@ static inline int access_ok(int type, const void __user *addr,
#define MOVES "move"
#endif
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
extern int __put_user_bad(void);
extern int __get_user_bad(void);
@@ -197,39 +179,55 @@ asm volatile ("\n" \
unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
-#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
+#define __suffix0
+#define __suffix1 b
+#define __suffix2 w
+#define __suffix4 l
+
+#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
asm volatile ("\n" \
"1: "MOVES"."#s1" (%2)+,%3\n" \
" move."#s1" %3,(%1)+\n" \
+ " .ifnc \""#s2"\",\"\"\n" \
"2: "MOVES"."#s2" (%2)+,%3\n" \
" move."#s2" %3,(%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \
"3: "MOVES"."#s3" (%2)+,%3\n" \
" move."#s3" %3,(%1)+\n" \
" .endif\n" \
+ " .endif\n" \
"4:\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10f\n" \
+ " .ifnc \""#s2"\",\"\"\n" \
" .long 2b,20f\n" \
" .ifnc \""#s3"\",\"\"\n" \
" .long 3b,30f\n" \
" .endif\n" \
+ " .endif\n" \
" .previous\n" \
"\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
- "10: clr."#s1" (%1)+\n" \
- "20: clr."#s2" (%1)+\n" \
+ "10: addq.l #"#n1",%0\n" \
+ " .ifnc \""#s2"\",\"\"\n" \
+ "20: addq.l #"#n2",%0\n" \
" .ifnc \""#s3"\",\"\"\n" \
- "30: clr."#s3" (%1)+\n" \
+ "30: addq.l #"#n3",%0\n" \
+ " .endif\n" \
" .endif\n" \
- " moveq.l #"#n",%0\n" \
" jra 4b\n" \
" .previous\n" \
: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
: : "memory")
+#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
+ ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
+#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \
+ ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \
+ __suffix##n1, __suffix##n2, __suffix##n3)
+
static __always_inline unsigned long
__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -237,37 +235,37 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
- __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
+ __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
break;
case 2:
- __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2);
+ __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
break;
case 3:
- __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
break;
case 4:
- __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
break;
case 5:
- __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
break;
case 6:
- __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
break;
case 7:
- __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
break;
case 8:
- __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
break;
case 9:
- __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
break;
case 10:
- __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
break;
case 12:
- __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
break;
default:
/* we limit the inlined version to 3 moves */
@@ -358,21 +356,23 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
-#define __copy_from_user(to, from, n) \
-(__builtin_constant_p(n) ? \
- __constant_copy_from_user(to, from, n) : \
- __generic_copy_from_user(to, from, n))
-
-#define __copy_to_user(to, from, n) \
-(__builtin_constant_p(n) ? \
- __constant_copy_to_user(to, from, n) : \
- __generic_copy_to_user(to, from, n))
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n))
+ return __constant_copy_from_user(to, from, n);
+ return __generic_copy_from_user(to, from, n);
+}
-#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
-#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n))
+ return __constant_copy_to_user(to, from, n);
+ return __generic_copy_to_user(to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : TASK_SIZE)
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index e77ce66c14d5..e482c3899ff1 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -23,25 +23,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
}
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
@@ -120,13 +101,21 @@ extern int __get_user_bad(void);
: "=d" (x) \
: "m" (*__ptr(ptr)))
-#define copy_from_user(to, from, n) (memcpy(to, from, n), 0)
-#define copy_to_user(to, from, n) (memcpy(to, from, n), 0)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ memcpy(to, (__force const void *)from, n);
+ return 0;
+}
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((__force void *)to, from, n);
+ return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Copy a null terminated string from userspace.
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82ec509..aab1edd0d4ba 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 379
+#define NR_syscalls 380
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf911f..25589f5b8669 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
#define __NR_copy_file_range 376
#define __NR_preadv2 377
#define __NR_pwritev2 378
+#define __NR_statx 379
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */