diff options
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r-- | arch/sparc/include/asm/bitops_64.h | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/clocksource.h | 17 | ||||
-rw-r--r-- | arch/sparc/include/asm/cmpxchg_32.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/elf_64.h | 14 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/processor_64.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/tsb.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/vdso.h | 24 | ||||
-rw-r--r-- | arch/sparc/include/asm/vvar.h | 74 |
10 files changed, 147 insertions, 3 deletions
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index a90eea24b286..ca7ea5913494 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -23,10 +23,11 @@ void set_bit(unsigned long nr, volatile unsigned long *addr); void clear_bit(unsigned long nr, volatile unsigned long *addr); void change_bit(unsigned long nr, volatile unsigned long *addr); +int fls(unsigned int word); +int __fls(unsigned long word); + #include <asm-generic/bitops/non-atomic.h> -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> #ifdef __KERNEL__ diff --git a/arch/sparc/include/asm/clocksource.h b/arch/sparc/include/asm/clocksource.h new file mode 100644 index 000000000000..d63ef224befe --- /dev/null +++ b/arch/sparc/include/asm/clocksource.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. + */ + +#ifndef _ASM_SPARC_CLOCKSOURCE_H +#define _ASM_SPARC_CLOCKSOURCE_H + +/* VDSO clocksources */ +#define VCLOCK_NONE 0 /* Nothing userspace can do. */ +#define VCLOCK_TICK 1 /* Use %tick. */ +#define VCLOCK_STICK 2 /* Use %stick. */ + +struct arch_clocksource_data { + int vclock_mode; +}; + +#endif /* _ASM_SPARC_CLOCKSOURCE_H */ diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 3e3823db303e..c73b5a3ab7b9 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) (unsigned long)_n_, sizeof(*(ptr))); \ }) +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new); +#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new) + #include <asm-generic/cmpxchg-local.h> /* diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 5894389f5ed5..25340df3570c 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -211,4 +211,18 @@ do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ (current->personality & (~PER_MASK))); \ } while (0) +extern unsigned int vdso_enabled; + +#define ARCH_DLINFO \ +do { \ + if (vdso_enabled) \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso); \ +} while (0) + +struct linux_binprm; + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); #endif /* !(__ASM_SPARC64_ELF_H) */ diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 5fe64a57b4ba..ad4fb93508ba 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -97,6 +97,7 @@ typedef struct { unsigned long thp_pte_count; struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; + void *vdso; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index e25d25b0a34b..b361702ef52a 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -8,9 +8,11 @@ #include <linux/spinlock.h> #include <linux/mm_types.h> +#include <linux/smp.h> #include <asm/spitfire.h> #include <asm-generic/mm_hooks.h> +#include <asm/percpu.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index c7c79fe8d265..aac23d4a4ddd 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -200,6 +200,13 @@ unsigned long get_wchan(struct task_struct *task); * To make a long story short, we are trying to yield the current cpu * strand during busy loops. */ +#ifdef BUILD_VDSO +#define cpu_relax() asm volatile("\n99:\n\t" \ + "rd %%ccr, %%g0\n\t" \ + "rd %%ccr, %%g0\n\t" \ + "rd %%ccr, %%g0\n\t" \ + ::: "memory") +#else /* ! BUILD_VDSO */ #define cpu_relax() asm volatile("\n99:\n\t" \ "rd %%ccr, %%g0\n\t" \ "rd %%ccr, %%g0\n\t" \ @@ -211,6 +218,7 @@ unsigned long get_wchan(struct task_struct *task); "nop\n\t" \ ".previous" \ ::: "memory") +#endif /* Prefetch support. This is tuned for UltraSPARC-III and later. * UltraSPARC-I will treat these as nops, and UltraSPARC-II has diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 25b6abdb3908..522a677e050d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; sllx REG2, 32, REG2; \ andcc REG1, REG2, %g0; \ be,pt %xcc, 700f; \ - sethi %hi(0x1ffc0000), REG2; \ + sethi %hi(0xffe00000), REG2; \ sllx REG2, 1, REG2; \ brgez,pn REG1, FAIL_LABEL; \ andn REG1, REG2, REG1; \ diff --git a/arch/sparc/include/asm/vdso.h b/arch/sparc/include/asm/vdso.h new file mode 100644 index 000000000000..93b628731a5e --- /dev/null +++ b/arch/sparc/include/asm/vdso.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. + */ + +#ifndef _ASM_SPARC_VDSO_H +#define _ASM_SPARC_VDSO_H + +struct vdso_image { + void *data; + unsigned long size; /* Always a multiple of PAGE_SIZE */ + long sym_vvar_start; /* Negative offset to the vvar area */ + long sym_vread_tick; /* Start of vread_tick section */ + long sym_vread_tick_patch_start; /* Start of tick read */ + long sym_vread_tick_patch_end; /* End of tick read */ +}; + +#ifdef CONFIG_SPARC64 +extern const struct vdso_image vdso_image_64_builtin; +#endif +#ifdef CONFIG_COMPAT +extern const struct vdso_image vdso_image_32_builtin; +#endif + +#endif /* _ASM_SPARC_VDSO_H */ diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h new file mode 100644 index 000000000000..0289503d1cb0 --- /dev/null +++ b/arch/sparc/include/asm/vvar.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. + */ + +#ifndef _ASM_SPARC_VVAR_DATA_H +#define _ASM_SPARC_VVAR_DATA_H + +#include <asm/clocksource.h> +#include <linux/seqlock.h> +#include <linux/time.h> +#include <linux/types.h> + +struct vvar_data { + unsigned int seq; + + int vclock_mode; + struct { /* extract of a clocksource struct */ + u64 cycle_last; + u64 mask; + int mult; + int shift; + } clock; + /* open coded 'struct timespec' */ + u64 wall_time_sec; + u64 wall_time_snsec; + u64 monotonic_time_snsec; + u64 monotonic_time_sec; + u64 monotonic_time_coarse_sec; + u64 monotonic_time_coarse_nsec; + u64 wall_time_coarse_sec; + u64 wall_time_coarse_nsec; + + int tz_minuteswest; + int tz_dsttime; +}; + +extern struct vvar_data *vvar_data; +extern int vdso_fix_stick; + +static inline unsigned int vvar_read_begin(const struct vvar_data *s) +{ + unsigned int ret; + +repeat: + ret = READ_ONCE(s->seq); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + smp_rmb(); /* Finish all reads before we return seq */ + return ret; +} + +static inline int vvar_read_retry(const struct vvar_data *s, + unsigned int start) +{ + smp_rmb(); /* Finish all reads before checking the value of seq */ + return unlikely(s->seq != start); +} + +static inline void vvar_write_begin(struct vvar_data *s) +{ + ++s->seq; + smp_wmb(); /* Makes sure that increment of seq is reflected */ +} + +static inline void vvar_write_end(struct vvar_data *s) +{ + smp_wmb(); /* Makes the value of seq current before we increment */ + ++s->seq; +} + + +#endif /* _ASM_SPARC_VVAR_DATA_H */ |