summaryrefslogtreecommitdiff
path: root/arch/riscv/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-24 10:00:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-24 10:00:37 -0700
commitfeff82eb5f4075d541990d0ba60dad14ea83ea9b (patch)
treeb9f16bbd5c8a44552fe94dd2462b296acc03dea8 /arch/riscv/include
parentff57d59200baadfdb41f94a49fed7d161a9a8124 (diff)
parent9b3a2be84803cf18c4b4d1efc695991f0daa153c (diff)
Merge tag 'riscv-for-linus-7.1-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Paul Walmsley: "There is one significant change outside arch/riscv in this pull request: the addition of a set of KUnit tests for strlen(), strnlen(), and strrchr(). Otherwise, the most notable changes are to add some RISC-V-specific string function implementations, to remove XIP kernel support, to add hardware error exception handling, and to optimize our runtime unaligned access speed testing. A few comments on the motivation for removing XIP support. It's been broken in the RISC-V kernel for months. The code is not easy to maintain. Furthermore, for XIP support to truly be useful for RISC-V, we think that compile-time feature switches would need to be added for many of the RISC-V ISA features and microarchitectural properties that are currently implemented with runtime patching. No one has stepped forward to take responsibility for that work, so many of us think it's best to remove it until clear use cases and champions emerge. Summary: - Add Kunit correctness testing and microbenchmarks for strlen(), strnlen(), and strrchr() - Add RISC-V-specific strnlen(), strchr(), strrchr() implementations - Add hardware error exception handling - Clean up and optimize our unaligned access probe code - Enable HAVE_IOREMAP_PROT to be able to use generic_access_phys() - Remove XIP kernel support - Warn when addresses outside the vmemmap range are passed to vmemmap_populate() - Update the ACPI FADT revision check to warn if it's not at least ACPI v6.6, which is when key RISC-V-specific tables were added to the specification - Increase COMMAND_LINE_SIZE to 2048 to match ARM64, x86, PowerPC, etc. - Make kaslr_offset() a static inline function, since there's no need for it to show up in the symbol table - Add KASLR offset and SATP to the VMCOREINFO ELF notes to improve kdump support - Add Makefile cleanup rule for vdso_cfi copied source files, and add a .gitignore for the build artifacts in that directory - Remove some redundant ifdefs that check Kconfig macros - Add missing SPDX license tag to the CFI selftest - Simplify UTS_MACHINE assignment in the RISC-V Makefile - Clarify some unclear comments and remove some superfluous comments - Fix various English typos across the RISC-V codebase" * tag 'riscv-for-linus-7.1-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (31 commits) riscv: Remove support for XIP kernel riscv: Reuse compare_unaligned_access() in check_vector_unaligned_access() riscv: Split out compare_unaligned_access() riscv: Reuse measure_cycles() in check_vector_unaligned_access() riscv: Split out measure_cycles() for reuse riscv: Clean up & optimize unaligned scalar access probe riscv: lib: add strrchr() implementation riscv: lib: add strchr() implementation riscv: lib: add strnlen() implementation lib/string_kunit: extend benchmarks to strnlen() and chr searches lib/string_kunit: add performance benchmark for strlen() lib/string_kunit: add correctness test for strrchr() lib/string_kunit: add correctness test for strnlen() lib/string_kunit: add correctness test for strlen() riscv: vdso_cfi: Add .gitignore for build artifacts riscv: vdso_cfi: Add clean rule for copied sources riscv: enable HAVE_IOREMAP_PROT riscv: mm: WARN_ON() for bad addresses in vmemmap_populate() riscv: acpi: update FADT revision check to 6.6 riscv: add hardware error trap handler support ...
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h1
-rw-r--r--arch/riscv/include/asm/atomic.h4
-rw-r--r--arch/riscv/include/asm/elf.h4
-rw-r--r--arch/riscv/include/asm/page.h34
-rw-r--r--arch/riscv/include/asm/pgtable.h20
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/scs.h1
-rw-r--r--arch/riscv/include/asm/set_memory.h2
-rw-r--r--arch/riscv/include/asm/smp.h2
-rw-r--r--arch/riscv/include/asm/string.h9
-rw-r--r--arch/riscv/include/asm/thread_info.h2
-rw-r--r--arch/riscv/include/asm/xip_fixup.h49
-rw-r--r--arch/riscv/include/uapi/asm/setup.h2
13 files changed, 23 insertions, 109 deletions
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index 41ec5cdec367..5b90ba5314ee 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -40,6 +40,7 @@ asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs);
#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
DECLARE_DO_ERROR_INFO(do_trap_unknown);
+DECLARE_DO_ERROR_INFO(do_trap_hardware_error);
DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 3f33dc54f94b..616b8b332ac5 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -46,7 +46,7 @@ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
#endif
/*
- * First, the atomic ops that have no ordering constraints and therefor don't
+ * First, the atomic ops that have no ordering constraints and therefore don't
* have the AQ or RL bits set. These don't return anything, so there's only
* one version to worry about.
*/
@@ -81,7 +81,7 @@ ATOMIC_OPS(xor, xor, i)
/*
* Atomic ops that have ordered, relaxed, acquire, and release variants.
- * There's two flavors of these: the arithmatic ops have both fetch and return
+ * There's two flavors of these: the arithmetic ops have both fetch and return
* versions, while the logical ops only have fetch versions.
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index c7aea7886d22..aa4961b0e208 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -59,8 +59,8 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#endif
/*
- * Provides information on the availiable set of ISA extensions to userspace,
- * via a bitmap that coorespends to each single-letter ISA extension. This is
+ * Provides information on the available set of ISA extensions to userspace,
+ * via a bitmap that corresponds to each single-letter ISA extension. This is
* essentially defunct, but will remain for compatibility with userspace.
*/
#define ELF_HWCAP riscv_get_elf_hwcap()
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index c78017061b17..709a36fb4323 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -29,11 +29,7 @@
#define PAGE_OFFSET_L5 _AC(0xff60000000000000, UL)
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL)
-#ifdef CONFIG_XIP_KERNEL
-#define PAGE_OFFSET PAGE_OFFSET_L3
-#else
#define PAGE_OFFSET kernel_map.page_offset
-#endif /* CONFIG_XIP_KERNEL */
#else
#define PAGE_OFFSET _AC(0xc0000000, UL)
#endif /* CONFIG_64BIT */
@@ -104,15 +100,8 @@ struct kernel_mapping {
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */
-#ifdef CONFIG_XIP_KERNEL
- unsigned long va_kernel_xip_text_pa_offset;
- unsigned long va_kernel_xip_data_pa_offset;
- uintptr_t xiprom;
- uintptr_t xiprom_sz;
-#else
unsigned long page_offset;
unsigned long va_kernel_pa_offset;
-#endif
};
extern struct kernel_mapping kernel_map;
@@ -131,16 +120,7 @@ extern unsigned long vmemmap_start_pfn;
void *linear_mapping_pa_to_va(unsigned long x);
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define kernel_mapping_pa_to_va(y) ({ \
- unsigned long _y = (unsigned long)(y); \
- (_y < phys_ram_base) ? \
- (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
- (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
- })
-#else
#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
-#endif
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
@@ -150,16 +130,7 @@ void *linear_mapping_pa_to_va(unsigned long x);
phys_addr_t linear_mapping_va_to_pa(unsigned long x);
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define kernel_mapping_va_to_pa(y) ({ \
- unsigned long _y = (unsigned long)(y); \
- (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
- (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
- (_y - kernel_map.va_kernel_xip_data_pa_offset); \
- })
-#else
#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
-#endif
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
@@ -190,7 +161,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
-unsigned long kaslr_offset(void);
+static inline unsigned long kaslr_offset(void)
+{
+ return kernel_map.virt_offset;
+}
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
{
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index a66d49bb26a8..a1a7c6520a09 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -134,21 +134,6 @@
#include <linux/page_table_check.h>
-#ifdef CONFIG_XIP_KERNEL
-#define XIP_FIXUP(addr) ({ \
- extern char _sdata[], _start[], _end[]; \
- uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
- + (uintptr_t)&_sdata - (uintptr_t)&_start; \
- uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
- + (uintptr_t)&_end - (uintptr_t)&_start; \
- uintptr_t __a = (uintptr_t)(addr); \
- (__a >= __rom_start_data && __a < __rom_end_data) ? \
- __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
- })
-#else
-#define XIP_FIXUP(addr) (addr)
-#endif /* CONFIG_XIP_KERNEL */
-
struct pt_alloc_ops {
pte_t *(*get_pte_virt)(phys_addr_t pa);
phys_addr_t (*alloc_pte)(uintptr_t va);
@@ -1272,13 +1257,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
extern char _start[];
extern void *_dtb_early_va;
extern uintptr_t _dtb_early_pa;
-#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
-#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
-#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
-#else
#define dtb_early_va _dtb_early_va
#define dtb_early_pa _dtb_early_pa
-#endif /* CONFIG_XIP_KERNEL */
extern u64 satp_mode;
void paging_init(void);
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 4c3dd94d0f63..812517b2cec1 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -86,7 +86,7 @@ struct pt_regs;
* preempt_v. All preempt_v context should be dropped in such case because
* V-regs are caller-saved. Only sstatus.VS=ON is persisted across a
* schedule() call.
- * - bit 30: The in-kernel preempt_v context is saved, and requries to be
+ * - bit 30: The in-kernel preempt_v context is saved, and is required to be
* restored when returning to the context that owns the preempt_v.
* - bit 31: The in-kernel preempt_v context is dirty, as signaled by the
* trap entry code. Any context switches out-of current task need to save
diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h
index ab7714aa93bd..023a412fe38d 100644
--- a/arch/riscv/include/asm/scs.h
+++ b/arch/riscv/include/asm/scs.h
@@ -10,7 +10,6 @@
/* Load init_shadow_call_stack to gp. */
.macro scs_load_init_stack
la gp, init_shadow_call_stack
- XIP_FIXUP_OFFSET gp
.endm
/* Load the per-CPU IRQ shadow call stack to gp. */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 87389e93325a..ef59e1716a2c 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -47,7 +47,7 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLER__ */
-#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL)
+#if defined(CONFIG_STRICT_KERNEL_RWX)
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 7ac80e9f2288..0ecc67641b09 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -105,7 +105,7 @@ static inline void riscv_ipi_set_virq_range(int virq, int nr)
#endif /* CONFIG_SMP */
-#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
+#if defined(CONFIG_HOTPLUG_CPU)
bool cpu_has_hotplug(unsigned int cpu);
#else
static inline bool cpu_has_hotplug(unsigned int cpu)
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index 5ba77f60bf0b..764ffe8f6479 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -28,6 +28,15 @@ extern asmlinkage __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+
+#define __HAVE_ARCH_STRNLEN
+extern asmlinkage __kernel_size_t strnlen(const char *, size_t);
+
+#define __HAVE_ARCH_STRCHR
+extern asmlinkage char *strchr(const char *, int);
+
+#define __HAVE_ARCH_STRRCHR
+extern asmlinkage char *strrchr(const char *, int);
#endif
/* For those files which don't want to check by kasan. */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 36918c9200c9..55019fdfa9ec 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -120,7 +120,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#include <asm-generic/thread_info_tif.h>
#define TIF_32BIT 16 /* compat-mode 32bit process */
-#define TIF_RISCV_V_DEFER_RESTORE 17 /* restore Vector before returing to user */
+#define TIF_RISCV_V_DEFER_RESTORE 17 /* restore Vector before returning to user */
#define _TIF_RISCV_V_DEFER_RESTORE BIT(TIF_RISCV_V_DEFER_RESTORE)
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
deleted file mode 100644
index f3d56299bc22..000000000000
--- a/arch/riscv/include/asm/xip_fixup.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * XIP fixup macros, only useful in assembly.
- */
-#ifndef _ASM_RISCV_XIP_FIXUP_H
-#define _ASM_RISCV_XIP_FIXUP_H
-
-#include <linux/pgtable.h>
-
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
- /* Fix-up address in Flash into address in RAM early during boot before
- * MMU is up. Because generated code "thinks" data is in Flash, but it
- * is actually in RAM (actually data is also in Flash, but Flash is
- * read-only, thus we need to use the data residing in RAM).
- *
- * The start of data in Flash is _sdata and the start of data in RAM is
- * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this:
- * reg += CONFIG_PHYS_RAM_BASE - _start
- */
- li t0, CONFIG_PHYS_RAM_BASE
- add \reg, \reg, t0
- la t0, _sdata
- sub \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
- /* In linker script, at the transition from read-only section to
- * writable section, the VMA is increased while LMA remains the same.
- * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is
- * changed)
- *
- * Consequently, early during boot before MMU is up, the generated code
- * reads the "writable" section at wrong addresses, because VMA is used
- * by compiler to generate code, but the data is located in Flash using
- * LMA.
- */
- la t0, _sdata
- sub \reg, \reg, t0
- la t0, __data_loc
- add \reg, \reg, t0
-.endm
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
-#endif
diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
index 66b13a522880..eb4f0209c696 100644
--- a/arch/riscv/include/uapi/asm/setup.h
+++ b/arch/riscv/include/uapi/asm/setup.h
@@ -3,6 +3,6 @@
#ifndef _UAPI_ASM_RISCV_SETUP_H
#define _UAPI_ASM_RISCV_SETUP_H
-#define COMMAND_LINE_SIZE 1024
+#define COMMAND_LINE_SIZE 2048
#endif /* _UAPI_ASM_RISCV_SETUP_H */