diff options
Diffstat (limited to 'arch/mips')
| -rw-r--r-- | arch/mips/alchemy/board-xxs1500.c | 1 | ||||
| -rw-r--r-- | arch/mips/include/asm/atomic.h | 55 | ||||
| -rw-r--r-- | arch/mips/include/asm/cmpxchg.h | 22 | ||||
| -rw-r--r-- | arch/mips/include/asm/kvm_host.h | 9 | ||||
| -rw-r--r-- | arch/mips/include/asm/mips-boards/launch.h | 5 | ||||
| -rw-r--r-- | arch/mips/kernel/asm-offsets.c | 1 | ||||
| -rw-r--r-- | arch/mips/kernel/cmpxchg.c | 4 | ||||
| -rw-r--r-- | arch/mips/kernel/kprobes.c | 3 | ||||
| -rw-r--r-- | arch/mips/kernel/process.c | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/smp.c | 1 | ||||
| -rw-r--r-- | arch/mips/kernel/syscalls/syscall_n32.tbl | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/syscalls/syscall_n64.tbl | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/syscalls/syscall_o32.tbl | 2 | ||||
| -rw-r--r-- | arch/mips/kvm/Makefile | 2 | ||||
| -rw-r--r-- | arch/mips/kvm/mips.c | 90 | ||||
| -rw-r--r-- | arch/mips/lib/mips-atomic.c | 12 | ||||
| -rw-r--r-- | arch/mips/mm/cache.c | 30 | ||||
| -rw-r--r-- | arch/mips/ralink/of.c | 2 |
18 files changed, 132 insertions, 113 deletions
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c index b184baa4e56a..f175bce2987f 100644 --- a/arch/mips/alchemy/board-xxs1500.c +++ b/arch/mips/alchemy/board-xxs1500.c @@ -18,6 +18,7 @@ #include <asm/reboot.h> #include <asm/setup.h> #include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/gpio-au1000.h> #include <prom.h> const char *get_system_type(void) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 27ad76791539..95e1f7f3597f 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -25,24 +25,25 @@ #include <asm/war.h> #define ATOMIC_OPS(pfx, type) \ -static __always_inline type pfx##_read(const pfx##_t *v) \ +static __always_inline type arch_##pfx##_read(const pfx##_t *v) \ { \ return READ_ONCE(v->counter); \ } \ \ -static __always_inline void pfx##_set(pfx##_t *v, type i) \ +static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \ { \ WRITE_ONCE(v->counter, i); \ } \ \ -static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \ +static __always_inline type \ +arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \ { \ - return cmpxchg(&v->counter, o, n); \ + return arch_cmpxchg(&v->counter, o, n); \ } \ \ -static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ +static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \ { \ - return xchg(&v->counter, n); \ + return arch_xchg(&v->counter, n); \ } ATOMIC_OPS(atomic, int) @@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64) #endif #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ -static __inline__ void pfx##_##op(type i, pfx##_t * v) \ +static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \ { \ type temp; \ \ @@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \ } #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ -static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ +static __inline__ type \ +arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ { \ type temp, result; \ \ @@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ } #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \ -static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ +static __inline__ type \ +arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ { \ int temp, result; \ \ @@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc) ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc) -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #ifdef CONFIG_64BIT ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd) ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd) -# define atomic64_add_return_relaxed atomic64_add_return_relaxed -# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed #endif /* CONFIG_64BIT */ #undef ATOMIC_OPS @@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc) ATOMIC_OPS(atomic, or, int, |=, or, ll, sc) ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc) -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #ifdef CONFIG_64BIT ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd) ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd) ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) -# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #endif #undef ATOMIC_OPS @@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) * The function returns the old value of @v minus @i. */ #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \ -static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ +static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \ { \ type temp, result; \ \ @@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ } ATOMIC_SIP_OP(atomic, int, subu, ll, sc) -#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) +#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) #ifdef CONFIG_64BIT ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd) -#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) +#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) #endif #undef ATOMIC_SIP_OP diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index ed8f3f3c4304..0b983800f48b 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size) } } -#define xchg(ptr, x) \ +#define arch_xchg(ptr, x) \ ({ \ __typeof__(*(ptr)) __res; \ \ @@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, } } -#define cmpxchg_local(ptr, old, new) \ +#define arch_cmpxchg_local(ptr, old, new) \ ((__typeof__(*(ptr))) \ __cmpxchg((ptr), \ (unsigned long)(__typeof__(*(ptr)))(old), \ (unsigned long)(__typeof__(*(ptr)))(new), \ sizeof(*(ptr)))) -#define cmpxchg(ptr, old, new) \ +#define arch_cmpxchg(ptr, old, new) \ ({ \ __typeof__(*(ptr)) __res; \ \ @@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, if (__SYNC_loongson3_war == 0) \ smp_mb__before_llsc(); \ \ - __res = cmpxchg_local((ptr), (old), (new)); \ + __res = arch_cmpxchg_local((ptr), (old), (new)); \ \ /* \ * In the Loongson3 workaround case __cmpxchg_asm() already \ @@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, }) #ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + arch_cmpxchg_local((ptr), (o), (n)); \ }) -#define cmpxchg64(ptr, o, n) \ +#define arch_cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ + arch_cmpxchg((ptr), (o), (n)); \ }) #else # include <asm-generic/cmpxchg-local.h> -# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) # ifdef CONFIG_SMP @@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, return ret; } -# define cmpxchg64(ptr, o, n) ({ \ +# define arch_cmpxchg64(ptr, o, n) ({ \ unsigned long long __old = (__typeof__(*(ptr)))(o); \ unsigned long long __new = (__typeof__(*(ptr)))(n); \ __typeof__(*(ptr)) __res; \ @@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, }) # else /* !CONFIG_SMP */ -# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +# define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) # endif /* !CONFIG_SMP */ #endif /* !CONFIG_64BIT */ diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index fca4547d580f..696f6b009377 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -109,10 +109,11 @@ static inline bool kvm_is_error_hva(unsigned long addr) } struct kvm_vm_stat { - ulong remote_tlb_flush; + struct kvm_vm_stat_generic generic; }; struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; u64 wait_exits; u64 cache_exits; u64 signal_exits; @@ -142,12 +143,6 @@ struct kvm_vcpu_stat { #ifdef CONFIG_CPU_LOONGSON64 u64 vz_cpucfg_exits; #endif - u64 halt_successful_poll; - u64 halt_attempted_poll; - u64 halt_poll_success_ns; - u64 halt_poll_fail_ns; - u64 halt_poll_invalid; - u64 halt_wakeup; }; struct kvm_arch_memory_slot { diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h index f93aa5ee2e2e..3481ed4c117b 100644 --- a/arch/mips/include/asm/mips-boards/launch.h +++ b/arch/mips/include/asm/mips-boards/launch.h @@ -3,6 +3,9 @@ * */ +#ifndef _ASM_MIPS_BOARDS_LAUNCH_H +#define _ASM_MIPS_BOARDS_LAUNCH_H + #ifndef _ASSEMBLER_ struct cpulaunch { @@ -34,3 +37,5 @@ struct cpulaunch { /* Polling period in count cycles for secondary CPU's */ #define LAUNCHPERIOD 10000 + +#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 5735b2cd6f2a..04ca75278f02 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -78,7 +78,6 @@ void output_ptreg_defines(void) void output_task_defines(void) { COMMENT("MIPS task_struct offsets."); - OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_THREAD_INFO, task_struct, stack); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 89107deb03fc..ac9c8cfb2ba9 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s do { old32 = load32; new32 = (load32 & ~mask) | (val << shift); - load32 = cmpxchg(ptr32, old32, new32); + load32 = arch_cmpxchg(ptr32, old32, new32); } while (load32 != old32); return (load32 & mask) >> shift; @@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, */ old32 = (load32 & ~mask) | (old << shift); new32 = (load32 & ~mask) | (new << shift); - load32 = cmpxchg(ptr32, old32, new32); + load32 = arch_cmpxchg(ptr32, old32, new32); if (load32 == old32) return old; } diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 54dfba8fa77c..75bff0f77319 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - if (kcb->kprobe_status & KPROBE_HIT_SS) { resume_execution(cur, regs, kcb); regs->cp0_status |= kcb->kprobe_old_SR; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index bff080db0294..73c8e7990a97 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -662,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task) unsigned long ra = 0; #endif - if (!task || task == current || task->state == TASK_RUNNING) + if (!task || task == current || task_is_running(task)) goto out; if (!task_stack_page(task)) goto out; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index ef86fbad8546..d542fb7af3ba 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -348,7 +348,6 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); - preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 5e0096657251..9220909526f9 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -381,7 +381,7 @@ 440 n32 process_madvise sys_process_madvise 441 n32 epoll_pwait2 compat_sys_epoll_pwait2 442 n32 mount_setattr sys_mount_setattr -443 n32 quotactl_path sys_quotactl_path +# 443 reserved for quotactl_path 444 n32 landlock_create_ruleset sys_landlock_create_ruleset 445 n32 landlock_add_rule sys_landlock_add_rule 446 n32 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 9974f5f8e49b..9cd1c34f31b5 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -357,7 +357,7 @@ 440 n64 process_madvise sys_process_madvise 441 n64 epoll_pwait2 sys_epoll_pwait2 442 n64 mount_setattr sys_mount_setattr -443 n64 quotactl_path sys_quotactl_path +# 443 reserved for quotactl_path 444 n64 landlock_create_ruleset sys_landlock_create_ruleset 445 n64 landlock_add_rule sys_landlock_add_rule 446 n64 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 39d6e71e57b6..d560c467a8c6 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -430,7 +430,7 @@ 440 o32 process_madvise sys_process_madvise 441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 o32 mount_setattr sys_mount_setattr -443 o32 quotactl_path sys_quotactl_path +# 443 reserved for quotactl_path 444 o32 landlock_create_ruleset sys_landlock_create_ruleset 445 o32 landlock_add_rule sys_landlock_add_rule 446 o32 landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 30cc060857c7..c67250a956b8 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -2,7 +2,7 @@ # Makefile for KVM support for MIPS # -common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o) +common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o) EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 4d4af97dcc88..af9dd029a4e1 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -38,43 +38,63 @@ #define VECTORSPACING 0x100 /* for EI/VI mode */ #endif -struct kvm_stats_debugfs_item debugfs_entries[] = { - VCPU_STAT("wait", wait_exits), - VCPU_STAT("cache", cache_exits), - VCPU_STAT("signal", signal_exits), - VCPU_STAT("interrupt", int_exits), - VCPU_STAT("cop_unusable", cop_unusable_exits), - VCPU_STAT("tlbmod", tlbmod_exits), - VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits), - VCPU_STAT("tlbmiss_st", tlbmiss_st_exits), - VCPU_STAT("addrerr_st", addrerr_st_exits), - VCPU_STAT("addrerr_ld", addrerr_ld_exits), - VCPU_STAT("syscall", syscall_exits), - VCPU_STAT("resvd_inst", resvd_inst_exits), - VCPU_STAT("break_inst", break_inst_exits), - VCPU_STAT("trap_inst", trap_inst_exits), - VCPU_STAT("msa_fpe", msa_fpe_exits), - VCPU_STAT("fpe", fpe_exits), - VCPU_STAT("msa_disabled", msa_disabled_exits), - VCPU_STAT("flush_dcache", flush_dcache_exits), - VCPU_STAT("vz_gpsi", vz_gpsi_exits), - VCPU_STAT("vz_gsfc", vz_gsfc_exits), - VCPU_STAT("vz_hc", vz_hc_exits), - VCPU_STAT("vz_grr", vz_grr_exits), - VCPU_STAT("vz_gva", vz_gva_exits), - VCPU_STAT("vz_ghfc", vz_ghfc_exits), - VCPU_STAT("vz_gpa", vz_gpa_exits), - VCPU_STAT("vz_resvd", vz_resvd_exits), +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; +static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == + sizeof(struct kvm_vm_stat) / sizeof(u64)); + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, wait_exits), + STATS_DESC_COUNTER(VCPU, cache_exits), + STATS_DESC_COUNTER(VCPU, signal_exits), + STATS_DESC_COUNTER(VCPU, int_exits), + STATS_DESC_COUNTER(VCPU, cop_unusable_exits), + STATS_DESC_COUNTER(VCPU, tlbmod_exits), + STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits), + STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits), + STATS_DESC_COUNTER(VCPU, addrerr_st_exits), + STATS_DESC_COUNTER(VCPU, addrerr_ld_exits), + STATS_DESC_COUNTER(VCPU, syscall_exits), + STATS_DESC_COUNTER(VCPU, resvd_inst_exits), + STATS_DESC_COUNTER(VCPU, break_inst_exits), + STATS_DESC_COUNTER(VCPU, trap_inst_exits), + STATS_DESC_COUNTER(VCPU, msa_fpe_exits), + STATS_DESC_COUNTER(VCPU, fpe_exits), + STATS_DESC_COUNTER(VCPU, msa_disabled_exits), + STATS_DESC_COUNTER(VCPU, flush_dcache_exits), + STATS_DESC_COUNTER(VCPU, vz_gpsi_exits), + STATS_DESC_COUNTER(VCPU, vz_gsfc_exits), + STATS_DESC_COUNTER(VCPU, vz_hc_exits), + STATS_DESC_COUNTER(VCPU, vz_grr_exits), + STATS_DESC_COUNTER(VCPU, vz_gva_exits), + STATS_DESC_COUNTER(VCPU, vz_ghfc_exits), + STATS_DESC_COUNTER(VCPU, vz_gpa_exits), + STATS_DESC_COUNTER(VCPU, vz_resvd_exits), #ifdef CONFIG_CPU_LOONGSON64 - VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), + STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits), #endif - VCPU_STAT("halt_successful_poll", halt_successful_poll), - VCPU_STAT("halt_attempted_poll", halt_attempted_poll), - VCPU_STAT("halt_poll_invalid", halt_poll_invalid), - VCPU_STAT("halt_wakeup", halt_wakeup), - VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), - VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), - {NULL} +}; +static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) == + sizeof(struct kvm_vcpu_stat) / sizeof(u64)); + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), }; bool kvm_trace_guest_mode_change; diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index de03838b343b..a9b72eacfc0b 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -37,7 +37,7 @@ */ notrace void arch_local_irq_disable(void) { - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" @@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void) : /* no inputs */ : "memory"); - preempt_enable(); + preempt_enable_notrace(); } EXPORT_SYMBOL(arch_local_irq_disable); @@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void) { unsigned long flags; - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" @@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void) : /* no inputs */ : "memory"); - preempt_enable(); + preempt_enable_notrace(); return flags; } @@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" @@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); - preempt_enable(); + preempt_enable_notrace(); } EXPORT_SYMBOL(arch_local_irq_restore); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index a7bf0c80371c..830ab91e574f 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -158,31 +158,29 @@ unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) -#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = PVA(_PAGE_PRESENT); - protection_map[5] = PVA(_PAGE_PRESENT); - protection_map[6] = PVA(_PAGE_PRESENT); - protection_map[7] = PVA(_PAGE_PRESENT); + protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); + protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[4] = PM(_PAGE_PRESENT); + protection_map[5] = PM(_PAGE_PRESENT); + protection_map[6] = PM(_PAGE_PRESENT); + protection_map[7] = PM(_PAGE_PRESENT); protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | + protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); - protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = PVA(_PAGE_PRESENT); - protection_map[13] = PVA(_PAGE_PRESENT); - protection_map[14] = PVA(_PAGE_PRESENT); - protection_map[15] = PVA(_PAGE_PRESENT); + protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); + protection_map[12] = PM(_PAGE_PRESENT); + protection_map[13] = PM(_PAGE_PRESENT); + protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); + protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); } -#undef _PVA #undef PM void cpu_cache_init(void) diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 0c5de07da097..0135376c5de5 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -8,6 +8,7 @@ #include <linux/io.h> #include <linux/clk.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/sizes.h> #include <linux/of_fdt.h> @@ -25,6 +26,7 @@ __iomem void *rt_sysc_membase; __iomem void *rt_memc_membase; +EXPORT_SYMBOL_GPL(rt_sysc_membase); __iomem void *plat_of_remap_node(const char *node) { |
