1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_CACHEFLUSH_H
7 #define _ASM_RISCV_CACHEFLUSH_H
8
9 #include <linux/mm.h>
10
11 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
12
13 /*
14 * The cache doesn't need to be flushed when TLB entries change when
15 * the cache is mapped to physical memory, not virtual memory
16 */
flush_cache_all(void)17 static inline void flush_cache_all(void)
18 {
19 }
20
flush_cache_mm(struct mm_struct * mm)21 static inline void flush_cache_mm(struct mm_struct *mm)
22 {
23 }
24
flush_cache_dup_mm(struct mm_struct * mm)25 static inline void flush_cache_dup_mm(struct mm_struct *mm)
26 {
27 }
28
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)29 static inline void flush_cache_range(struct vm_area_struct *vma,
30 unsigned long start,
31 unsigned long end)
32 {
33 }
34
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)35 static inline void flush_cache_page(struct vm_area_struct *vma,
36 unsigned long vmaddr,
37 unsigned long pfn)
38 {
39 }
40
flush_dcache_mmap_lock(struct address_space * mapping)41 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
42 {
43 }
44
flush_dcache_mmap_unlock(struct address_space * mapping)45 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
46 {
47 }
48
flush_icache_page(struct vm_area_struct * vma,struct page * page)49 static inline void flush_icache_page(struct vm_area_struct *vma,
50 struct page *page)
51 {
52 }
53
flush_cache_vmap(unsigned long start,unsigned long end)54 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
55 {
56 }
57
flush_cache_vunmap(unsigned long start,unsigned long end)58 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
59 {
60 }
61
62 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
63 do { \
64 memcpy(dst, src, len); \
65 flush_icache_user_range(vma, page, vaddr, len); \
66 } while (0)
67 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
68 memcpy(dst, src, len)
69
local_flush_icache_all(void)70 static inline void local_flush_icache_all(void)
71 {
72 asm volatile ("fence.i" ::: "memory");
73 }
74
75 #define PG_dcache_clean PG_arch_1
76
flush_dcache_page(struct page * page)77 static inline void flush_dcache_page(struct page *page)
78 {
79 if (test_bit(PG_dcache_clean, &page->flags))
80 clear_bit(PG_dcache_clean, &page->flags);
81 }
82
83 /*
84 * RISC-V doesn't have an instruction to flush parts of the instruction cache,
85 * so instead we just flush the whole thing.
86 */
87 #define flush_icache_range(start, end) flush_icache_all()
88 #define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
89
90 #ifndef CONFIG_SMP
91
92 #define flush_icache_all() local_flush_icache_all()
93 #define flush_icache_mm(mm, local) flush_icache_all()
94
95 #else /* CONFIG_SMP */
96
97 void flush_icache_all(void);
98 void flush_icache_mm(struct mm_struct *mm, bool local);
99
100 #endif /* CONFIG_SMP */
101
102 /*
103 * Bits in sys_riscv_flush_icache()'s flags argument.
104 */
105 #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
106 #define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
107
108 #endif /* _ASM_RISCV_CACHEFLUSH_H */
109