diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 21 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-ce-cipher.c | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/barrier.h | 16 | ||||
-rw-r--r-- | arch/arm64/include/asm/compat.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/dma-mapping.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu_errata.c | 9 | ||||
-rw-r--r-- | arch/arm64/kernel/cpuinfo.c | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/efi.c | 14 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 10 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp.S | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/inject_fault.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 35 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 14 | ||||
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 48 |
18 files changed, 152 insertions, 68 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ac16a482ff1..e55848c1edf4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075 If unsure, say Y. +config ARM64_ERRATUM_834220 + bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" + depends on KVM + default y + help + This option adds an alternative code sequence to work around ARM + erratum 834220 on Cortex-A57 parts up to r1p2. + + Affected Cortex-A57 parts might report a Stage 2 translation + fault as the result of a Stage 1 fault for load crossing a + page boundary when there is a permission or device memory + alignment fault at Stage 1 and a translation fault at Stage 2. + + The workaround is to verify that the Stage 1 translation + doesn't generate a fault before handling the Stage 2 fault. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" depends on COMPAT diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index ce47792a983d..f7bd9bf0bbb3 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c @@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey); static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-ce", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 624f9679f4b0..9622eb48f894 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -64,27 +64,31 @@ do { \ #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1; \ + union { typeof(*p) __val; char __c[1]; } __u; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile ("ldarb %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u8 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 2: \ asm volatile ("ldarh %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u16 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 4: \ asm volatile ("ldar %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u32 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 8: \ asm volatile ("ldar %0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u64 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ } \ - ___p1; \ + __u.__val; \ }) #define read_barrier_depends() do { } while(0) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 7fbed6919b54..eb8432bb82b8 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,7 +23,6 @@ */ #include <linux/types.h> #include <linux/sched.h> -#include <linux/ptrace.h> #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ @@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) +#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) static inline void __user *arch_compat_alloc_user_space(long len) { diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 11d5bb0fdd54..52722ee73dba 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -29,8 +29,9 @@ #define ARM64_HAS_PAN 4 #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 +#define ARM64_WORKAROUND_834220 7 -#define ARM64_NCAPS 7 +#define ARM64_NCAPS 8 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 54d0ead41afc..61e08f360e31 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -18,7 +18,6 @@ #ifdef __KERNEL__ -#include <linux/acpi.h> #include <linux/types.h> #include <linux/vmalloc.h> @@ -26,22 +25,16 @@ #include <asm/xen/hypervisor.h> #define DMA_ERROR_CODE (~(dma_addr_t)0) -extern struct dma_map_ops *dma_ops; extern struct dma_map_ops dummy_dma_ops; static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (unlikely(!dev)) - return dma_ops; - else if (dev->archdata.dma_ops) + if (dev && dev->archdata.dma_ops) return dev->archdata.dma_ops; - else if (acpi_disabled) - return dma_ops; /* - * When ACPI is enabled, if arch_set_dma_ops is not called, - * we will disable device DMA capability by setting it - * to dummy_dma_ops. + * We expect no ISA devices, and all other DMA masters are expected to + * have someone call arch_setup_dma_ops at device creation time. */ return &dummy_dma_ops; } diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 17e92f05b1fe..3ca894ecf699 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; } +/* + * vcpu_reg should always be passed a register number coming from a + * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 + * with banked registers. + */ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { - if (vcpu_mode_is_32bit(vcpu)) - return vcpu_reg32(vcpu, reg_num); - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; } diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index c0e87898ba96..24165784b803 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void) #define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); -#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) /* * This is called when "tsk" is about to enter lazy TLB mode. diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9819a9426b69..7e074f93f383 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 24926f2504f7..feb6b4efa641 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_834220 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 834220", + .capability = ARM64_WORKAROUND_834220, + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif #ifdef CONFIG_ARM64_ERRATUM_845719 { /* Cortex-A53 r0p[01234] */ diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 706679d0a0b4..212ae6361d8b 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -30,6 +30,7 @@ #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/smp.h> +#include <linux/delay.h> /* * In case the boot CPU is hotpluggable, we record its initial state and @@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v) */ seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000UL/HZ), + loops_per_jiffy / (5000UL/HZ) % 100); + /* * Dump out the common processor features in a single line. * Userspace should read the hwcaps with getauxval(AT_HWCAP) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index de46b50f4cdf..fc5508e0df57 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void) { efi_memory_desc_t *md; + init_new_context(NULL, &efi_mm); + for_each_efi_memory_desc(&memmap, md) { u64 paddr, npages, size; pgprot_t prot; @@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, + __pgprot(pgprot_val(prot) | PTE_NG)); } return true; } @@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - if (mm == &init_mm) - cpu_set_reserved_ttbr0(); - else - cpu_switch_mm(mm->pgd, mm); - - local_flush_tlb_all(); - if (icache_is_aivivt()) - __local_flush_icache_all(); + switch_mm(NULL, mm, NULL); } void efi_virtmap_load(void) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index fce95e17cf7f..1095aa483a1c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,3 +1,4 @@ +#include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/slab.h> #include <asm/cacheflush.h> @@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) local_dbg_save(flags); /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) hw_breakpoint_restore(NULL); } + unpause_graph_tracing(); + /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1599701ef044..86c289832272 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context) ENDPROC(__kvm_flush_vm_context) __kvm_hyp_panic: + // Stash PAR_EL1 before corrupting it in __restore_sysregs + mrs x0, par_el1 + push x0, xzr + // Guess the context by looking at VTTBR: // If zero, then we're already a host. // Otherwise restore a minimal host context before panicing. @@ -898,7 +902,7 @@ __kvm_hyp_panic: mrs x3, esr_el2 mrs x4, far_el2 mrs x5, hpfar_el2 - mrs x6, par_el1 + pop x6, xzr // active context PAR_EL1 mrs x7, tpidr_el2 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -914,7 +918,7 @@ __kvm_hyp_panic: ENDPROC(__kvm_hyp_panic) __hyp_panic_str: - .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" + .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" .align 2 @@ -1015,9 +1019,15 @@ el1_trap: b.ne 1f // Not an abort we care about /* This is an abort. Check for permission fault */ +alternative_if_not ARM64_WORKAROUND_834220 and x2, x1, #ESR_ELx_FSC_TYPE cmp x2, #FSC_PERM b.ne 1f // Not a permission fault +alternative_else + nop // Use the permission fault path to + nop // check for a valid S1 translation, + nop // regardless of the ESR value. +alternative_endif /* * Check for Stage-1 page table walk, which is guaranteed diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 85c57158dcd9..648112e90ed5 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) /* Note: These now point to the banked copies */ *vcpu_spsr(vcpu) = new_spsr_value; - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; /* Branch to exception vector */ if (sctlr & (1 << 13)) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 131a199114b4..7963aa4b5d28 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -18,6 +18,7 @@ */ #include <linux/gfp.h> +#include <linux/acpi.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/genalloc.h> @@ -28,9 +29,6 @@ #include <asm/cacheflush.h> -struct dma_map_ops *dma_ops; -EXPORT_SYMBOL(dma_ops); - static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, bool coherent) { @@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { - int ret; - - dma_ops = &swiotlb_dma_ops; - - ret = atomic_pool_init(); - - return ret; + return atomic_pool_init(); } arch_initcall(arm64_dma_init); @@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, { bool coherent = is_device_dma_coherent(dev); int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); + size_t iosize = size; void *addr; if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) return NULL; + + size = PAGE_ALIGN(size); + /* * Some drivers rely on this, and we probably don't want the * possibility of stale kernel data being read by devices anyway. @@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); - pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, + pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, flush_page); if (!pages) return NULL; @@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, __builtin_return_address(0)); if (!addr) - iommu_dma_free(dev, pages, size, handle); + iommu_dma_free(dev, pages, iosize, handle); } else { struct page *page; /* @@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, if (!addr) return NULL; - *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); + *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); if (iommu_dma_mapping_error(dev, *handle)) { if (coherent) __free_pages(page, get_order(size)); @@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { + size_t iosize = size; + + size = PAGE_ALIGN(size); /* * @cpu_addr will be one of 3 things depending on how it was allocated: * - A remapped array of pages from iommu_dma_alloc(), for all @@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * Hence how dodgy the below logic looks... */ if (__in_atomic_pool(cpu_addr, size)) { - iommu_dma_unmap_page(dev, handle, size, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_from_pool(cpu_addr, size); } else if (is_vmalloc_addr(cpu_addr)){ struct vm_struct *area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return; - iommu_dma_free(dev, area->pages, size, &handle); + iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { - iommu_dma_unmap_page(dev, handle, size, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } @@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu, bool coherent) { - if (!acpi_disabled && !dev->archdata.dma_ops) - dev->archdata.dma_ops = dma_ops; + if (!dev->archdata.dma_ops) + dev->archdata.dma_ops = &swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e3f563c81c48..abb66f84d4ac 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) * for now. This will get more fine grained later once all memory * is mapped */ - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); + unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); if (end < kernel_x_start) { create_mapping(start, __phys_to_virt(start), @@ -451,18 +451,18 @@ static void __init fixup_executable(void) { #ifdef CONFIG_DEBUG_RODATA /* now that we are actually fully mapped, make the start/end more fine grained */ - if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_start = round_down(__pa(_stext), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(aligned_start, __phys_to_virt(aligned_start), __pa(_stext) - aligned_start, PAGE_KERNEL); } - if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_end = round_up(__pa(__init_end), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(__pa(__init_end), (unsigned long)__init_end, aligned_end - __pa(__init_end), PAGE_KERNEL); @@ -475,7 +475,7 @@ void mark_rodata_ro(void) { create_mapping_late(__pa(_stext), (unsigned long)_stext, (unsigned long)_etext - (unsigned long)_stext, - PAGE_KERNEL_EXEC | PTE_RDONLY); + PAGE_KERNEL_ROX); } #endif diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index cf3c7d4a1b58..d6a53ef2350b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -50,7 +50,7 @@ static const int bpf2a64[] = { [BPF_REG_8] = A64_R(21), [BPF_REG_9] = A64_R(22), /* read-only frame pointer to access stack */ - [BPF_REG_FP] = A64_FP, + [BPF_REG_FP] = A64_R(25), /* temporary register for internal BPF JIT */ [TMP_REG_1] = A64_R(23), [TMP_REG_2] = A64_R(24), @@ -155,18 +155,49 @@ static void build_prologue(struct jit_ctx *ctx) stack_size += 4; /* extra for skb_copy_bits buffer */ stack_size = STACK_ALIGN(stack_size); + /* + * BPF prog stack layout + * + * high + * original A64_SP => 0:+-----+ BPF prologue + * |FP/LR| + * current A64_FP => -16:+-----+ + * | ... | callee saved registers + * +-----+ + * | | x25/x26 + * BPF fp register => -80:+-----+ + * | | + * | ... | BPF prog stack + * | | + * | | + * current A64_SP => +-----+ + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + * + */ + + /* Save FP and LR registers to stay align with ARM64 AAPCS */ + emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_MOV(1, A64_FP, A64_SP), ctx); + /* Save callee-saved register */ emit(A64_PUSH(r6, r7, A64_SP), ctx); emit(A64_PUSH(r8, r9, A64_SP), ctx); if (ctx->tmp_used) emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); - /* Set up BPF stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); + /* Save fp (x25) and x26. SP requires 16 bytes alignment */ + emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx); - /* Set up frame pointer */ + /* Set up BPF prog stack base register (x25) */ emit(A64_MOV(1, fp, A64_SP), ctx); + /* Set up function call stack */ + emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); + /* Clear registers A and X */ emit_a64_mov_i64(ra, 0, ctx); emit_a64_mov_i64(rx, 0, ctx); @@ -190,14 +221,17 @@ static void build_epilogue(struct jit_ctx *ctx) /* We're done with BPF stack */ emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); + /* Restore fs (x25) and x26 */ + emit(A64_POP(fp, A64_R(26), A64_SP), ctx); + /* Restore callee-saved register */ if (ctx->tmp_used) emit(A64_POP(tmp1, tmp2, A64_SP), ctx); emit(A64_POP(r8, r9, A64_SP), ctx); emit(A64_POP(r6, r7, A64_SP), ctx); - /* Restore frame pointer */ - emit(A64_MOV(1, fp, A64_SP), ctx); + /* Restore FP/LR registers */ + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); /* Set return value */ emit(A64_MOV(1, A64_R(0), r0), ctx); @@ -758,7 +792,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, image_size, 2, ctx.image); - bpf_flush_icache(ctx.image, ctx.image + ctx.idx); + bpf_flush_icache(header, ctx.image + ctx.idx); set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)ctx.image; |