xref: /linux/include/asm-generic/cacheflush.h (revision 6c8c1406)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_CACHEFLUSH_H
3 #define _ASM_GENERIC_CACHEFLUSH_H
4 
5 #include <linux/instrumented.h>
6 
7 struct mm_struct;
8 struct vm_area_struct;
9 struct page;
10 struct address_space;
11 
12 /*
13  * The cache doesn't need to be flushed when TLB entries change when
14  * the cache is mapped to physical memory, not virtual memory
15  */
16 #ifndef flush_cache_all
17 static inline void flush_cache_all(void)
18 {
19 }
20 #endif
21 
22 #ifndef flush_cache_mm
23 static inline void flush_cache_mm(struct mm_struct *mm)
24 {
25 }
26 #endif
27 
28 #ifndef flush_cache_dup_mm
29 static inline void flush_cache_dup_mm(struct mm_struct *mm)
30 {
31 }
32 #endif
33 
34 #ifndef flush_cache_range
35 static inline void flush_cache_range(struct vm_area_struct *vma,
36 				     unsigned long start,
37 				     unsigned long end)
38 {
39 }
40 #endif
41 
42 #ifndef flush_cache_page
43 static inline void flush_cache_page(struct vm_area_struct *vma,
44 				    unsigned long vmaddr,
45 				    unsigned long pfn)
46 {
47 }
48 #endif
49 
50 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
51 static inline void flush_dcache_page(struct page *page)
52 {
53 }
54 
55 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
56 #endif
57 
58 #ifndef flush_dcache_mmap_lock
59 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
60 {
61 }
62 #endif
63 
64 #ifndef flush_dcache_mmap_unlock
65 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
66 {
67 }
68 #endif
69 
70 #ifndef flush_icache_range
71 static inline void flush_icache_range(unsigned long start, unsigned long end)
72 {
73 }
74 #endif
75 
76 #ifndef flush_icache_user_range
77 #define flush_icache_user_range flush_icache_range
78 #endif
79 
80 #ifndef flush_icache_page
81 static inline void flush_icache_page(struct vm_area_struct *vma,
82 				     struct page *page)
83 {
84 }
85 #endif
86 
87 #ifndef flush_icache_user_page
88 static inline void flush_icache_user_page(struct vm_area_struct *vma,
89 					   struct page *page,
90 					   unsigned long addr, int len)
91 {
92 }
93 #endif
94 
95 #ifndef flush_cache_vmap
96 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
97 {
98 }
99 #endif
100 
101 #ifndef flush_cache_vunmap
102 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
103 {
104 }
105 #endif
106 
107 #ifndef copy_to_user_page
108 #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
109 	do { \
110 		instrument_copy_to_user((void __user *)dst, src, len); \
111 		memcpy(dst, src, len); \
112 		flush_icache_user_page(vma, page, vaddr, len); \
113 	} while (0)
114 #endif
115 
116 
117 #ifndef copy_from_user_page
118 #define copy_from_user_page(vma, page, vaddr, dst, src, len)		  \
119 	do {								  \
120 		instrument_copy_from_user_before(dst, (void __user *)src, \
121 						 len);			  \
122 		memcpy(dst, src, len);					  \
123 		instrument_copy_from_user_after(dst, (void __user *)src, len, \
124 						0);			  \
125 	} while (0)
126 #endif
127 
128 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
129