diff options
-rw-r--r-- | arch/riscv/include/asm/cacheflush.h | 63 |
1 files changed, 59 insertions, 4 deletions
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index ad8678f1b54a..555b20b11dc3 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -6,11 +6,66 @@ #ifndef _ASM_RISCV_CACHEFLUSH_H #define _ASM_RISCV_CACHEFLUSH_H -#include <asm-generic/cacheflush.h> +#include <linux/mm.h> -#undef flush_icache_range -#undef flush_icache_user_range -#undef flush_dcache_page +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +static inline void flush_cache_all(void) +{ +} + +static inline void flush_cache_mm(struct mm_struct *mm) +{ +} + +static inline void flush_cache_dup_mm(struct mm_struct *mm) +{ +} + +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ +} + +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, + unsigned long pfn) +{ +} + +static inline void flush_dcache_mmap_lock(struct address_space *mapping) +{ +} + +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) +{ +} + +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ +} + +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) static inline void local_flush_icache_all(void) { |