summaryrefslogtreecommitdiff
path: root/arch/loongarch
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch')
-rw-r--r--arch/loongarch/Kconfig16
-rw-r--r--arch/loongarch/Makefile3
-rw-r--r--arch/loongarch/include/asm/Kbuild17
-rw-r--r--arch/loongarch/include/asm/addrspace.h4
-rw-r--r--arch/loongarch/include/asm/asmmacro.h1
-rw-r--r--arch/loongarch/include/asm/ftrace.h1
-rw-r--r--arch/loongarch/include/asm/hardirq.h3
-rw-r--r--arch/loongarch/include/asm/inst.h3
-rw-r--r--arch/loongarch/include/asm/io.h10
-rw-r--r--arch/loongarch/include/asm/irq_work.h10
-rw-r--r--arch/loongarch/include/asm/kvm_host.h14
-rw-r--r--arch/loongarch/include/asm/kvm_para.h11
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h5
-rw-r--r--arch/loongarch/include/asm/loongarch.h14
-rw-r--r--arch/loongarch/include/asm/paravirt.h5
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h6
-rw-r--r--arch/loongarch/include/asm/pgtable.h23
-rw-r--r--arch/loongarch/include/asm/setup.h5
-rw-r--r--arch/loongarch/include/asm/smp.h2
-rw-r--r--arch/loongarch/include/asm/stackframe.h11
-rw-r--r--arch/loongarch/include/asm/unistd.h3
-rw-r--r--arch/loongarch/include/asm/uprobes.h4
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild2
-rw-r--r--arch/loongarch/include/uapi/asm/kvm.h4
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h4
-rw-r--r--arch/loongarch/kernel/Makefile.syscalls3
-rw-r--r--arch/loongarch/kernel/acpi.c22
-rw-r--r--arch/loongarch/kernel/head.S11
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c2
-rw-r--r--arch/loongarch/kernel/kprobes.c4
-rw-r--r--arch/loongarch/kernel/paravirt.c151
-rw-r--r--arch/loongarch/kernel/ptrace.c3
-rw-r--r--arch/loongarch/kernel/relocate.c52
-rw-r--r--arch/loongarch/kernel/setup.c4
-rw-r--r--arch/loongarch/kernel/smp.c21
-rw-r--r--arch/loongarch/kernel/syscall.c25
-rw-r--r--arch/loongarch/kernel/time.c2
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S8
-rw-r--r--arch/loongarch/kvm/Kconfig1
-rw-r--r--arch/loongarch/kvm/exit.c38
-rw-r--r--arch/loongarch/kvm/main.c1
-rw-r--r--arch/loongarch/kvm/mmu.c72
-rw-r--r--arch/loongarch/kvm/tlb.c5
-rw-r--r--arch/loongarch/kvm/vcpu.c156
-rw-r--r--arch/loongarch/power/platform.c37
-rw-r--r--arch/loongarch/power/suspend_asm.S8
46 files changed, 705 insertions, 102 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ddc042895d01..ebdb7156560c 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -16,12 +16,14 @@ config LOONGARCH
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
@@ -106,6 +108,7 @@ config LOONGARCH
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
@@ -607,6 +610,7 @@ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
config RELOCATABLE
bool "Relocatable kernel"
+ select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required, so as to relocate
@@ -649,6 +653,17 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
+config PARAVIRT_TIME_ACCOUNTING
+ bool "Paravirtual steal time accounting"
+ depends on PARAVIRT
+ help
+ Select this option to enable fine granularity task steal time
+ accounting. Time spent executing other tasks in parallel with
+ the current vCPU is discounted from the vCPU power. To account for
+ that, there can be a small performance impact.
+
+ If in doubt, say N here.
+
endmenu
config ARCH_SELECT_MEMORY_MODEL
@@ -699,6 +714,7 @@ config ARCH_HIBERNATION_POSSIBLE
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
+source "drivers/cpufreq/Kconfig"
endmenu
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 8674e7e24c4a..ae3f80622f4c 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -105,7 +105,8 @@ KBUILD_CFLAGS += -fno-jump-tables
endif
KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat
-KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
+KBUILD_RUSTFLAGS_KERNEL += -Zdirect-access-external-data=yes
+KBUILD_RUSTFLAGS_MODULE += -Zdirect-access-external-data=no
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index c862672ed953..2bb3676429c0 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,28 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_64.h
generated-y += orc_hash.h
-generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += rwsem.h
-generic-y += segment.h
generic-y += user.h
-generic-y += stat.h
-generic-y += fcntl.h
generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += statfs.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += poll.h
generic-y += param.h
-generic-y += posix_types.h
-generic-y += resource.h
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index 7bd47d65bf7a..fe198b473f84 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -37,6 +37,10 @@ extern unsigned long vm_map_base;
#define UNCACHE_BASE CSR_DMW0_BASE
#endif
+#ifndef WRITECOMBINE_BASE
+#define WRITECOMBINE_BASE CSR_DMW2_BASE
+#endif
+
#define DMW_PABITS 48
#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 655db7d7a427..8d7f501b0a12 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -609,6 +609,7 @@
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
.pushsection ".la_abs", "aw", %progbits
+ .p2align 3
.dword 766b
.dword \sym
.popsection
diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h
index de891c2c83d4..c0a682808e07 100644
--- a/arch/loongarch/include/asm/ftrace.h
+++ b/arch/loongarch/include/asm/ftrace.h
@@ -28,7 +28,6 @@ struct dyn_ftrace;
struct dyn_arch_ftrace { };
#define ARCH_SUPPORTS_FTRACE_OPS 1
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ftrace_init_nop ftrace_init_nop
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
index d41138abcf26..1d7feb719515 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -12,11 +12,12 @@
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
-#define NR_IPI 2
+#define NR_IPI 3
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNCTION,
+ IPI_IRQ_WORK,
};
typedef struct {
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index c3993fd88aba..944482063f14 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -532,6 +532,9 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG0I15_FORMAT(break, break_op)
+/* like emit_break(imm) but returns a constant expression */
+#define __emit_break(imm) ((u32)((imm) | (break_op << 15)))
+
#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
int offset) \
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index c2f9979b2979..5e95a60df180 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -25,10 +25,16 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long prot_val)
{
- if (prot_val & _CACHE_CC)
+ switch (prot_val & _CACHE_MASK) {
+ case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
- else
+ case _CACHE_SUC:
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
+ case _CACHE_WUC:
+ return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset);
+ default:
+ return NULL;
+ }
}
#define ioremap(offset, size) \
diff --git a/arch/loongarch/include/asm/irq_work.h b/arch/loongarch/include/asm/irq_work.h
new file mode 100644
index 000000000000..d63076e9160d
--- /dev/null
+++ b/arch/loongarch/include/asm/irq_work.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_IRQ_WORK_H
+#define _ASM_LOONGARCH_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return IS_ENABLED(CONFIG_SMP);
+}
+
+#endif /* _ASM_LOONGARCH_IRQ_WORK_H */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index c87b6ea0ec47..44b54965f5b4 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -30,12 +30,17 @@
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
+#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
#define KVM_GUESTDBG_VALID_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
+#define KVM_DIRTY_LOG_MANUAL_CAPS \
+ (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET)
+
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
@@ -190,6 +195,7 @@ struct kvm_vcpu_arch {
/* vcpu's vpid */
u64 vpid;
+ gpa_t flush_gpa;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
@@ -201,6 +207,13 @@ struct kvm_vcpu_arch {
struct kvm_mp_state mp_state;
/* cpucfg */
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+
+ /* paravirt steal time */
+ struct {
+ u64 guest_addr;
+ u64 last_steal;
+ struct gfn_to_hva_cache cache;
+ } st;
};
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
@@ -261,7 +274,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
index 4ba2312e5f8c..335fb86778e2 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -14,6 +14,7 @@
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
#define KVM_HCALL_FUNC_IPI 1
+#define KVM_HCALL_FUNC_NOTIFY 2
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
@@ -24,6 +25,16 @@
#define KVM_HCALL_INVALID_CODE -1UL
#define KVM_HCALL_INVALID_PARAMETER -2UL
+#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
+#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u32 pad[12];
+};
+
/*
* Hypercall interface for KVM hypervisor
*
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 590a92cb5416..c416cb7125c0 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -120,4 +120,9 @@ static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long v
vcpu->arch.gprs[num] = val;
}
+static inline bool kvm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index eb09adda54b7..04a78010fc72 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -169,6 +169,7 @@
#define KVM_SIGNATURE "KVM\0"
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
#define KVM_FEATURE_IPI BIT(1)
+#define KVM_FEATURE_STEAL_TIME BIT(2)
#ifndef __ASSEMBLY__
@@ -877,7 +878,7 @@
#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
-/* Direct Map window 0/1 */
+/* Direct Map window 0/1/2/3 */
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
#define CSR_DMW0_VSEG _CONST64_(0x8000)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@@ -889,6 +890,14 @@
#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+#define CSR_DMW2_PLV0 _CONST64_(1 << 0)
+#define CSR_DMW2_MAT _CONST64_(2 << 4)
+#define CSR_DMW2_VSEG _CONST64_(0xa000)
+#define CSR_DMW2_BASE (CSR_DMW2_VSEG << DMW_PABITS)
+#define CSR_DMW2_INIT (CSR_DMW2_BASE | CSR_DMW2_MAT | CSR_DMW2_PLV0)
+
+#define CSR_DMW3_INIT 0x0
+
/* Performance Counter registers */
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@@ -1053,11 +1062,14 @@
#define LOONGARCH_IOCSR_NODECNT 0x408
#define LOONGARCH_IOCSR_MISC_FUNC 0x420
+#define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10)
#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
#define LOONGARCH_IOCSR_CPUTEMP 0x428
+#define LOONGARCH_IOCSR_SMCMBX 0x51c
+
/* PerCore CSR, only accessible by local cores */
#define LOONGARCH_IOCSR_IPI_STATUS 0x1000
#define LOONGARCH_IOCSR_IPI_EN 0x1004
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
index 0965710f47f2..dddec49671ae 100644
--- a/arch/loongarch/include/asm/paravirt.h
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -18,6 +18,7 @@ static inline u64 paravirt_steal_clock(int cpu)
}
int __init pv_ipi_init(void);
+int __init pv_time_init(void);
#else
@@ -26,5 +27,9 @@ static inline int pv_ipi_init(void)
return 0;
}
+static inline int pv_time_init(void)
+{
+ return 0;
+}
#endif // CONFIG_PARAVIRT
#endif
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 21319c1e045c..82cd3a9f094b 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -22,6 +22,7 @@
#define _PAGE_PFN_SHIFT 12
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
+#define _PAGE_DEVMAP_SHIFT 59
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
@@ -35,6 +36,7 @@
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
@@ -74,8 +76,8 @@
#define __READABLE (_PAGE_VALID)
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
-#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
-#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
_PAGE_USER | _CACHE_CC)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index af3acdf3481a..3fbf1f37c58e 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -424,6 +424,9 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL;
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
+static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); }
+static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
+
#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
@@ -467,8 +470,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, addr, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
@@ -558,6 +561,17 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd;
}
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_DEVMAP);
+}
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_DEVMAP;
+ return pmd;
+}
+
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
@@ -613,6 +627,11 @@ static inline long pmd_protnone(pmd_t pmd)
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pud_devmap(pud) (0)
+#define pgd_devmap(pgd) (0)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
index ee52fb1e9963..3c2fb16b11b6 100644
--- a/arch/loongarch/include/asm/setup.h
+++ b/arch/loongarch/include/asm/setup.h
@@ -34,6 +34,11 @@ extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
+#ifdef CONFIG_RELR
+extern long __relr_dyn_begin;
+extern long __relr_dyn_end;
+#endif
+
extern unsigned long __init relocate_kernel(void);
#endif
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 278700cfee88..50db503f44e3 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -69,9 +69,11 @@ extern int __cpu_logical_map[NR_CPUS];
#define ACTION_BOOT_CPU 0
#define ACTION_RESCHEDULE 1
#define ACTION_CALL_FUNCTION 2
+#define ACTION_IRQ_WORK 3
#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU)
#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE)
#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION)
+#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
struct secondary_data {
unsigned long stack;
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index d9eafd3ee3d1..66736837085b 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -38,6 +38,17 @@
cfi_restore \reg \offset \docfi
.endm
+ .macro SETUP_DMWINS temp
+ li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN0
+ li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN1
+ li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
+ csrwr \temp, LOONGARCH_CSR_DMWIN2
+ li.d \temp, CSR_DMW3_INIT # 0x0, unused
+ csrwr \temp, LOONGARCH_CSR_DMWIN3
+ .endm
+
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
index cfddb0116a8c..e2c0f3d86c7b 100644
--- a/arch/loongarch/include/asm/unistd.h
+++ b/arch/loongarch/include/asm/unistd.h
@@ -8,4 +8,7 @@
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_CLONE
+
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h
index c8f59983f702..99a0d198927f 100644
--- a/arch/loongarch/include/asm/uprobes.h
+++ b/arch/loongarch/include/asm/uprobes.h
@@ -9,10 +9,10 @@ typedef u32 uprobe_opcode_t;
#define MAX_UINSN_BYTES 8
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
-#define UPROBE_SWBP_INSN larch_insn_gen_break(BRK_UPROBE_BP)
+#define UPROBE_SWBP_INSN __emit_break(BRK_UPROBE_BP)
#define UPROBE_SWBP_INSN_SIZE LOONGARCH_INSN_SIZE
-#define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP)
+#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
struct arch_uprobe {
unsigned long resume_era;
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index 4aa680ca2e5f..c6d141d7b7d7 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_64.h
+
generic-y += kvm_para.h
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index f9abef382317..ddc5cab0ffd0 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -81,7 +81,11 @@ struct kvm_fpu {
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
+#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
+#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
struct kvm_debug_exit_arch {
};
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
index fcb668984f03..1f01980f9c94 100644
--- a/arch/loongarch/include/uapi/asm/unistd.h
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -1,5 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..523bb411a3bc
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_64 += newstat
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 5cf59c617126..929a497c987e 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -57,15 +57,22 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
return ioremap_cache(phys, size);
}
+static int cpu_enumerated = 0;
+
#ifdef CONFIG_SMP
static int set_processor_mask(u32 id, u32 flags)
{
-
+ int nr_cpus;
int cpu, cpuid = id;
- if (num_processors >= nr_cpu_ids) {
- pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
- " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+ if (!cpu_enumerated)
+ nr_cpus = NR_CPUS;
+ else
+ nr_cpus = nr_cpu_ids;
+
+ if (num_processors >= nr_cpus) {
+ pr_warn(PREFIX "nr_cpus limit of %i reached."
+ " processor 0x%x ignored.\n", nr_cpus, cpuid);
return -ENODEV;
@@ -73,11 +80,13 @@ static int set_processor_mask(u32 id, u32 flags)
if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
else
- cpu = cpumask_next_zero(-1, cpu_present_mask);
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
+
+ if (!cpu_enumerated)
+ set_cpu_possible(cpu, true);
if (flags & ACPI_MADT_ENABLED) {
num_processors++;
- set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
@@ -138,6 +147,7 @@ static void __init acpi_process_madt(void)
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS);
+ cpu_enumerated = 1;
loongson_sysconf.nr_cpus = num_processors;
}
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 4677ea8fa8e9..506a99a5bbc7 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -44,11 +44,7 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize);
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */
- li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
- csrwr t0, LOONGARCH_CSR_DMWIN1
-
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */
@@ -124,11 +120,8 @@ SYM_CODE_END(kernel_entry)
* function after setting up the stack and tp registers.
*/
SYM_CODE_START(smpboot_entry)
- li.d t0, CSR_DMW0_INIT # UC, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN1
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
#ifdef CONFIG_PAGE_SIZE_4KB
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index 621ad7634df7..a6e4b605bfa8 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -221,7 +221,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
- if (bp->hw.target)
+ if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH))
regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
index 17b040bd6067..8ba391cfabb0 100644
--- a/arch/loongarch/kernel/kprobes.c
+++ b/arch/loongarch/kernel/kprobes.c
@@ -4,8 +4,8 @@
#include <linux/preempt.h>
#include <asm/break.h>
-#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP)
-#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP)
+#define KPROBE_BP_INSN __emit_break(BRK_KPROBE_BP)
+#define KPROBE_SSTEPBP_INSN __emit_break(BRK_KPROBE_SSTEPBP)
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index 1633ed4f692f..9c9b75b76f62 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -2,13 +2,17 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/irq_work.h>
#include <linux/jump_label.h>
#include <linux/kvm_para.h>
+#include <linux/reboot.h>
#include <linux/static_call.h>
#include <asm/paravirt.h>
+static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
+static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static u64 native_steal_clock(int cpu)
{
@@ -17,6 +21,34 @@ static u64 native_steal_clock(int cpu)
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+static bool steal_acc = true;
+
+static int __init parse_no_stealacc(char *arg)
+{
+ steal_acc = false;
+ return 0;
+}
+early_param("no-steal-acc", parse_no_stealacc);
+
+static u64 paravt_steal_clock(int cpu)
+{
+ int version;
+ u64 steal;
+ struct kvm_steal_time *src;
+
+ src = &per_cpu(steal_time, cpu);
+ do {
+
+ version = src->version;
+ virt_rmb(); /* Make sure that the version is read before the steal */
+ steal = src->steal;
+ virt_rmb(); /* Make sure that the steal is read before the next version */
+
+ } while ((version & 1) || (version != src->version));
+
+ return steal;
+}
+
#ifdef CONFIG_SMP
static void pv_send_ipi_single(int cpu, unsigned int action)
{
@@ -97,6 +129,11 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
info->ipi_irqs[IPI_CALL_FUNCTION]++;
}
+ if (action & SMP_IRQ_WORK) {
+ irq_work_run();
+ info->ipi_irqs[IPI_IRQ_WORK]++;
+ }
+
return IRQ_HANDLED;
}
@@ -149,3 +186,117 @@ int __init pv_ipi_init(void)
return 0;
}
+
+static int pv_enable_steal_time(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long addr;
+ struct kvm_steal_time *st;
+
+ if (!has_steal_clock)
+ return -EPERM;
+
+ st = &per_cpu(steal_time, cpu);
+ addr = per_cpu_ptr_to_phys(st);
+
+ /* The whole structure kvm_steal_time should be in one page */
+ if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
+ pr_warn("Illegal PV steal time addr %lx\n", addr);
+ return -EFAULT;
+ }
+
+ addr |= KVM_STEAL_PHYS_VALID;
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
+
+ return 0;
+}
+
+static void pv_disable_steal_time(void)
+{
+ if (has_steal_clock)
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
+}
+
+#ifdef CONFIG_SMP
+static int pv_time_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ pv_enable_steal_time();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pv_time_cpu_down_prepare(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ pv_disable_steal_time();
+ local_irq_restore(flags);
+
+ return 0;
+}
+#endif
+
+static void pv_cpu_reboot(void *unused)
+{
+ pv_disable_steal_time();
+}
+
+static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
+{
+ on_each_cpu(pv_cpu_reboot, NULL, 1);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pv_reboot_nb = {
+ .notifier_call = pv_reboot_notify,
+};
+
+int __init pv_time_init(void)
+{
+ int r, feature;
+
+ if (!cpu_has_hypervisor)
+ return 0;
+ if (!kvm_para_available())
+ return 0;
+
+ feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+ if (!(feature & KVM_FEATURE_STEAL_TIME))
+ return 0;
+
+ has_steal_clock = 1;
+ r = pv_enable_steal_time();
+ if (r < 0) {
+ has_steal_clock = 0;
+ return 0;
+ }
+ register_reboot_notifier(&pv_reboot_nb);
+
+#ifdef CONFIG_SMP
+ r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "loongarch/pv_time:online",
+ pv_time_cpu_online, pv_time_cpu_down_prepare);
+ if (r < 0) {
+ has_steal_clock = 0;
+ pr_err("Failed to install cpu hotplug callbacks\n");
+ return r;
+ }
+#endif
+
+ static_call_update(pv_steal_clock, paravt_steal_clock);
+
+ static_key_slow_inc(&paravirt_steal_enabled);
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ if (steal_acc)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+#endif
+
+ pr_info("Using paravirt steal-time\n");
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index 200109de1971..19dc6eff45cc 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -589,6 +589,7 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
struct arch_hw_breakpoint_ctrl ctrl;
+ struct thread_info *ti = task_thread_info(tsk);
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
@@ -613,8 +614,10 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
if (err)
return err;
attr.disabled = 0;
+ set_ti_thread_flag(ti, TIF_LOAD_WATCH);
} else {
attr.disabled = 1;
+ clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
}
return modify_user_hw_breakpoint(bp, &attr);
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 1acfa704c8d0..50c469067f3a 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
+#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -34,11 +35,27 @@ static inline void __init relocate_relative(void)
if (rela->r_info != R_LARCH_RELATIVE)
continue;
- if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
- relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
-
+ relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
*(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
}
+
+#ifdef CONFIG_RELR
+ u64 *addr = NULL;
+ u64 *relr = (u64 *)&__relr_dyn_begin;
+ u64 *relr_end = (u64 *)&__relr_dyn_end;
+
+ for ( ; relr < relr_end; relr++) {
+ if ((*relr & 1) == 0) {
+ addr = (u64 *)(*relr + reloc_offset);
+ *addr++ += reloc_offset;
+ } else {
+ for (u64 *p = addr, r = *relr >> 1; r; p++, r >>= 1)
+ if (r & 1)
+ *p += reloc_offset;
+ addr += 63;
+ }
+ }
+#endif
}
static inline void __init relocate_absolute(long random_offset)
@@ -123,6 +140,32 @@ static inline __init bool kaslr_disabled(void)
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
return true;
+#ifdef CONFIG_HIBERNATION
+ str = strstr(builtin_cmdline, "nohibernate");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(boot_command_line, "nohibernate");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(builtin_cmdline, "noresume");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(boot_command_line, "noresume");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return false;
+
+ str = strstr(builtin_cmdline, "resume=");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+
+ str = strstr(boot_command_line, "resume=");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+#endif
+
return false;
}
@@ -170,7 +213,7 @@ unsigned long __init relocate_kernel(void)
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@@ -182,6 +225,7 @@ unsigned long __init relocate_kernel(void)
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+ early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 3d048f1be143..0f0740f0be27 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -576,8 +576,10 @@ static void __init prefill_possible_map(void)
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
- for (; i < NR_CPUS; i++)
+ for (; i < NR_CPUS; i++) {
+ set_cpu_present(i, false);
set_cpu_possible(i, false);
+ }
set_nr_cpu_ids(possible);
}
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 1436d2465939..ca405ab86aae 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -13,6 +13,7 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq_work.h>
#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
@@ -70,6 +71,7 @@ static DEFINE_PER_CPU(int, cpu_state);
static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -217,6 +219,13 @@ void arch_smp_send_reschedule(int cpu)
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK);
+}
+#endif
+
static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
unsigned int action;
@@ -234,6 +243,11 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
}
+ if (action & SMP_IRQ_WORK) {
+ irq_work_run();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
+ }
+
return IRQ_HANDLED;
}
@@ -271,11 +285,10 @@ static void __init fdt_smp_setup(void)
if (cpuid >= nr_cpu_ids)
continue;
- if (cpuid == loongson_sysconf.boot_cpu_id) {
+ if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
- } else {
- cpu = cpumask_next_zero(-1, cpu_present_mask);
- }
+ else
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
num_processors++;
set_cpu_possible(cpu, true);
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index 8801611143ab..ba5d0930a74f 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -9,17 +9,21 @@
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
+#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/asm.h>
#include <asm/exception.h>
+#include <asm/loongarch.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
@@ -32,13 +36,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_64.h>
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long);
-void noinstr do_syscall(struct pt_regs *regs)
+void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
{
unsigned long nr;
sys_call_fn syscall_fn;
@@ -54,11 +58,28 @@ void noinstr do_syscall(struct pt_regs *regs)
nr = syscall_enter_from_user_mode(regs, nr);
+ add_random_kstack_offset();
+
if (nr < NR_syscalls) {
syscall_fn = sys_call_table[nr];
regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
regs->regs[7], regs->regs[8], regs->regs[9]);
}
+ /*
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints: 16-bytes (i.e. 4-bits)
+ * aligned, which will remove the 4 low bits from any entropy chosen
+ * here.
+ *
+ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+ choose_random_kstack_offset(drdtime());
+
syscall_exit_to_user_mode(regs);
}
+
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+STACK_FRAME_NON_STANDARD(do_syscall);
+#endif
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index fd5354f9be7c..46d7d40c87e3 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -15,6 +15,7 @@
#include <asm/cpu-features.h>
#include <asm/loongarch.h>
+#include <asm/paravirt.h>
#include <asm/time.h>
u64 cpu_clock_freq;
@@ -214,4 +215,5 @@ void __init time_init(void)
constant_clockevent_init();
constant_clocksource_init();
+ pv_time_init();
}
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 3c7595342730..08ea921cdec1 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -113,6 +113,14 @@ SECTIONS
__rela_dyn_end = .;
}
+#ifdef CONFIG_RELR
+ .relr.dyn : ALIGN(8) {
+ __relr_dyn_begin = .;
+ *(.relr.dyn)
+ __relr_dyn_end = .;
+ }
+#endif
+
.data.rel : { *(.data.rel*) }
#ifdef CONFIG_RELOCATABLE
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index c4ef2b4d9797..248744b4d086 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -29,6 +29,7 @@ config KVM
select KVM_MMIO
select HAVE_KVM_READONLY_MEM
select KVM_XFER_TO_GUEST_WORK
+ select SCHED_INFO
help
Support hosting virtualized guest machines using
hardware virtualization extensions. You will need
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index a68573e091c0..ea73f9dc2cc6 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -24,7 +24,7 @@
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
{
int rd, rj;
- unsigned int index;
+ unsigned int index, ret;
if (inst.reg2_format.opcode != cpucfg_op)
return EMULATE_FAIL;
@@ -50,7 +50,10 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
break;
case CPUCFG_KVM_FEATURE:
- vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
+ ret = KVM_FEATURE_IPI;
+ if (kvm_pvtime_supported())
+ ret |= KVM_FEATURE_STEAL_TIME;
+ vcpu->arch.gprs[rd] = ret;
break;
default:
vcpu->arch.gprs[rd] = 0;
@@ -687,6 +690,34 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static long kvm_save_notify(struct kvm_vcpu *vcpu)
+{
+ unsigned long id, data;
+
+ id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ switch (id) {
+ case KVM_FEATURE_STEAL_TIME:
+ if (!kvm_pvtime_supported())
+ return KVM_HCALL_INVALID_CODE;
+
+ if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+ return KVM_HCALL_INVALID_PARAMETER;
+
+ vcpu->arch.st.guest_addr = data;
+ if (!(data & KVM_STEAL_PHYS_VALID))
+ break;
+
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ break;
+ default:
+ break;
+ };
+
+ return 0;
+};
+
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
@@ -758,6 +789,9 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
kvm_send_pv_ipi(vcpu);
ret = KVM_HCALL_SUCCESS;
break;
+ case KVM_HCALL_FUNC_NOTIFY:
+ ret = kvm_save_notify(vcpu);
+ break;
default:
ret = KVM_HCALL_INVALID_CODE;
break;
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index 86a2f2d0cb27..844736b99d38 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -242,6 +242,7 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
kvm_update_vpid(vcpu, cpu);
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu;
+ kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
}
/* Restore GSTAT(0x50).vpid */
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 98883aa23ab8..2634a9e8d82c 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -163,6 +163,7 @@ static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
child = kvm_mmu_memory_cache_alloc(cache);
_kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]);
+ smp_wmb(); /* Make pte visible before pmd */
kvm_set_pte(entry, __pa(child));
} else if (kvm_pte_huge(*entry)) {
return entry;
@@ -444,6 +445,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
int needs_flush;
+ u32 old_flags = old ? old->flags : 0;
+ u32 new_flags = new ? new->flags : 0;
+ bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
+
+ /* Only track memslot flags changed */
+ if (change != KVM_MR_FLAGS_ONLY)
+ return;
+
+ /* Discard dirty page tracking on readonly memslot */
+ if ((old_flags & new_flags) & KVM_MEM_READONLY)
+ return;
/*
* If dirty page logging is enabled, write protect all pages in the slot
@@ -454,9 +466,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot()
*/
- if (change == KVM_MR_FLAGS_ONLY &&
- (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
- new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
+ if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) {
+ /*
+ * Initially-all-set does not require write protecting any page
+ * because they're all assumed to be dirty.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
+
spin_lock(&kvm->mmu_lock);
/* Write protect GPA page table entries */
needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
@@ -540,6 +557,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
struct kvm_memory_slot *slot;
+ struct page *page;
spin_lock(&kvm->mmu_lock);
@@ -551,10 +569,8 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
/* Track access to pages marked old */
- new = *ptep;
- if (!kvm_pte_young(new))
- new = kvm_pte_mkyoung(new);
- /* call kvm_set_pfn_accessed() after unlock */
+ new = kvm_pte_mkyoung(*ptep);
+ /* call kvm_set_pfn_accessed() after unlock */
if (write && !kvm_pte_dirty(new)) {
if (!kvm_pte_write(new)) {
@@ -582,19 +598,22 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
if (changed) {
kvm_set_pte(ptep, new);
pfn = kvm_pte_pfn(new);
+ page = kvm_pfn_to_refcounted_page(pfn);
+ if (page)
+ get_page(page);
}
spin_unlock(&kvm->mmu_lock);
- /*
- * Fixme: pfn may be freed after mmu_lock
- * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
- */
- if (kvm_pte_young(changed))
- kvm_set_pfn_accessed(pfn);
+ if (changed) {
+ if (kvm_pte_young(changed))
+ kvm_set_pfn_accessed(pfn);
- if (kvm_pte_dirty(changed)) {
- mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
+ if (kvm_pte_dirty(changed)) {
+ mark_page_dirty(kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
+ if (page)
+ put_page(page);
}
return ret;
out:
@@ -737,6 +756,7 @@ static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t g
val += PAGE_SIZE;
}
+ smp_wmb(); /* Make pte visible before pmd */
/* The later kvm_flush_tlb_gpa() will flush hugepage tlb */
kvm_set_pte(ptep, __pa(child));
@@ -858,11 +878,21 @@ retry:
/* Disable dirty logging on HugePages */
level = 0;
- if (!fault_supports_huge_mapping(memslot, hva, write)) {
- level = 0;
- } else {
+ if (fault_supports_huge_mapping(memslot, hva, write)) {
+ /* Check page level about host mmu*/
level = host_pfn_mapping_level(kvm, gfn, memslot);
if (level == 1) {
+ /*
+ * Check page level about secondary mmu
+ * Disable hugepage if it is normal page on
+ * secondary mmu already
+ */
+ ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
+ if (ptep && !kvm_pte_huge(*ptep))
+ level = 0;
+ }
+
+ if (level == 1) {
gfn = gfn & ~(PTRS_PER_PTE - 1);
pfn = pfn & ~(PTRS_PER_PTE - 1);
}
@@ -892,7 +922,6 @@ retry:
kvm_set_pfn_dirty(pfn);
}
- kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -908,7 +937,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret;
/* Invalidate this entry in the TLB */
- kvm_flush_tlb_gpa(vcpu, gpa);
+ vcpu->arch.flush_gpa = gpa;
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
return 0;
}
diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c
index 02535df6b51f..ebdbe9264e9c 100644
--- a/arch/loongarch/kvm/tlb.c
+++ b/arch/loongarch/kvm/tlb.c
@@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void)
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
{
- unsigned long flags;
-
- local_irq_save(flags);
+ lockdep_assert_irqs_disabled();
gpa &= (PAGE_MASK << 1);
invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa);
- local_irq_restore(flags);
}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 9e8030d45129..16756ffb55e8 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -31,6 +31,50 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
+static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+ u32 version;
+ u64 steal;
+ gpa_t gpa;
+ struct kvm_memslots *slots;
+ struct kvm_steal_time __user *st;
+ struct gfn_to_hva_cache *ghc;
+
+ ghc = &vcpu->arch.st.cache;
+ gpa = vcpu->arch.st.guest_addr;
+ if (!(gpa & KVM_STEAL_PHYS_VALID))
+ return;
+
+ gpa &= KVM_STEAL_PHYS_MASK;
+ slots = kvm_memslots(vcpu->kvm);
+ if (slots->generation != ghc->generation || gpa != ghc->gpa) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
+ ghc->gpa = INVALID_GPA;
+ return;
+ }
+ }
+
+ st = (struct kvm_steal_time __user *)ghc->hva;
+ unsafe_get_user(version, &st->version, out);
+ if (version & 1)
+ version += 1; /* first time write, random junk */
+
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
+ smp_wmb();
+
+ unsafe_get_user(steal, &st->steal, out);
+ steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ unsafe_put_user(steal, &st->steal, out);
+
+ smp_wmb();
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
+out:
+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+}
+
/*
* kvm_check_requests - check and handle pending vCPU requests
*
@@ -48,9 +92,22 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
if (kvm_dirty_ring_check_request(vcpu))
return RESUME_HOST;
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ kvm_update_stolen_time(vcpu);
+
return RESUME_GUEST;
}
+static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
+ if (vcpu->arch.flush_gpa != INVALID_GPA) {
+ kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
+ vcpu->arch.flush_gpa = INVALID_GPA;
+ }
+}
+
/*
* Check and handle pending signal and vCPU requests etc
* Run with irq enabled and preempt enabled
@@ -101,6 +158,13 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
/* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu);
+
+ /*
+ * Called after function kvm_check_vpid()
+ * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
+ * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
+ */
+ kvm_late_check_requests(vcpu);
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
@@ -354,6 +418,17 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
return -EINVAL;
if (id == LOONGARCH_CSR_ESTAT) {
+ preempt_disable();
+ vcpu_load(vcpu);
+ /*
+ * Sync pending interrupts into ESTAT so that interrupt
+ * remains during VM migration stage
+ */
+ kvm_deliver_intr(vcpu);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
+ vcpu_put(vcpu);
+ preempt_enable();
+
/* ESTAT IP0~IP7 get from GINTC */
gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
@@ -662,6 +737,16 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
return -ENXIO;
}
+static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ if (!kvm_pvtime_supported() ||
+ attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ return 0;
+}
+
static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -671,6 +756,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_CPUCFG:
ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -678,7 +766,7 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
-static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
+static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret = 0;
@@ -694,6 +782,23 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
return ret;
}
+static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 gpa;
+ u64 __user *user = (u64 __user *)attr->addr;
+
+ if (!kvm_pvtime_supported() ||
+ attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ gpa = vcpu->arch.st.guest_addr;
+ if (put_user(gpa, user))
+ return -EFAULT;
+
+ return 0;
+}
+
static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -701,7 +806,10 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
switch (attr->group) {
case KVM_LOONGARCH_VCPU_CPUCFG:
- ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
+ ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
+ break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
break;
default:
break;
@@ -716,6 +824,43 @@ static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
return -ENXIO;
}
+static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int idx, ret = 0;
+ u64 gpa, __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (!kvm_pvtime_supported() ||
+ attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ return -ENXIO;
+
+ if (get_user(gpa, user))
+ return -EFAULT;
+
+ if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
+ return -EINVAL;
+
+ if (!(gpa & KVM_STEAL_PHYS_VALID)) {
+ vcpu->arch.st.guest_addr = gpa;
+ return 0;
+ }
+
+ /* Check the address is in a valid memslot */
+ idx = srcu_read_lock(&kvm->srcu);
+ if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
+ ret = -EINVAL;
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (!ret) {
+ vcpu->arch.st.guest_addr = gpa;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ }
+
+ return ret;
+}
+
static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -725,6 +870,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_CPUCFG:
ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
+ ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -994,6 +1142,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct loongarch_csrs *csr;
vcpu->arch.vpid = 0;
+ vcpu->arch.flush_gpa = INVALID_GPA;
hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
@@ -1084,6 +1233,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Control guest page CCA attribute */
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
@@ -1266,7 +1416,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_complete_iocsr_read(vcpu, run);
}
- if (run->immediate_exit)
+ if (!vcpu->wants_to_run)
return r;
/* Clear exit_reason */
diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
index 3ea8e07aa225..0909729dc2e1 100644
--- a/arch/loongarch/power/platform.c
+++ b/arch/loongarch/power/platform.c
@@ -34,22 +34,49 @@ void enable_pci_wakeup(void)
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, 0);
}
+static struct platform_device loongson3_cpufreq_device = {
+ .name = "loongson3_cpufreq",
+ .id = -1,
+};
+
+static int __init loongson_cpufreq_init(void)
+{
+ if (!cpu_has_scalefreq)
+ return -ENODEV;
+
+ return platform_device_register(&loongson3_cpufreq_device);
+}
+
+arch_initcall(loongson_cpufreq_init);
+
+static void default_suspend_addr(void)
+{
+ acpi_enter_sleep_state(ACPI_STATE_S3);
+}
+
static int __init loongson3_acpi_suspend_init(void)
{
#ifdef CONFIG_ACPI
acpi_status status;
uint64_t suspend_addr = 0;
- if (acpi_disabled || acpi_gbl_reduced_hardware)
+ if (acpi_disabled)
+ return 0;
+
+ if (!acpi_gbl_reduced_hardware)
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+
+ if (!acpi_sleep_state_supported(ACPI_STATE_S3))
return 0;
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
- pr_err("ACPI S3 is not support!\n");
- return -1;
+ pr_info("ACPI S3 supported with hardware register default\n");
+ loongson_sysconf.suspend_addr = (u64)default_suspend_addr;
+ } else {
+ pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n");
+ loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
}
- loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
#endif
return 0;
}
diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S
index e2fc3b4e31f0..9fe28d5a0270 100644
--- a/arch/loongarch/power/suspend_asm.S
+++ b/arch/loongarch/power/suspend_asm.S
@@ -66,18 +66,14 @@ SYM_FUNC_START(loongarch_suspend_enter)
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
ld.d t0, t0, 0
- jirl a0, t0, 0 /* Call BIOS's STR sleep routine */
+ jirl ra, t0, 0 /* Call BIOS's STR sleep routine */
/*
* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
- li.d t0, CSR_DMW0_INIT # UC, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN0
- li.d t0, CSR_DMW1_INIT # CA, PLV0
- csrwr t0, LOONGARCH_CSR_DMWIN1
-
+ SETUP_DMWINS t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */