xref: /linux/include/asm-generic/cacheflush.h (revision 7a92fc8b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
292a73bd2SChristoph Hellwig #ifndef _ASM_GENERIC_CACHEFLUSH_H
392a73bd2SChristoph Hellwig #define _ASM_GENERIC_CACHEFLUSH_H
45c01b46bSArnd Bergmann 
52b420aafSAlexander Potapenko #include <linux/instrumented.h>
62b420aafSAlexander Potapenko 
78dbdd504SStephen Rothwell struct mm_struct;
88dbdd504SStephen Rothwell struct vm_area_struct;
98dbdd504SStephen Rothwell struct page;
108dbdd504SStephen Rothwell struct address_space;
118dbdd504SStephen Rothwell 
125c01b46bSArnd Bergmann /*
135c01b46bSArnd Bergmann  * The cache doesn't need to be flushed when TLB entries change when
145c01b46bSArnd Bergmann  * the cache is mapped to physical memory, not virtual memory
155c01b46bSArnd Bergmann  */
164f0bd808SMike Rapoport #ifndef flush_cache_all
flush_cache_all(void)17c296d4dcSQian Cai static inline void flush_cache_all(void)
18c296d4dcSQian Cai {
19c296d4dcSQian Cai }
204f0bd808SMike Rapoport #endif
21c296d4dcSQian Cai 
224f0bd808SMike Rapoport #ifndef flush_cache_mm
flush_cache_mm(struct mm_struct * mm)23c296d4dcSQian Cai static inline void flush_cache_mm(struct mm_struct *mm)
24c296d4dcSQian Cai {
25c296d4dcSQian Cai }
264f0bd808SMike Rapoport #endif
27c296d4dcSQian Cai 
284f0bd808SMike Rapoport #ifndef flush_cache_dup_mm
flush_cache_dup_mm(struct mm_struct * mm)29c296d4dcSQian Cai static inline void flush_cache_dup_mm(struct mm_struct *mm)
30c296d4dcSQian Cai {
31c296d4dcSQian Cai }
324f0bd808SMike Rapoport #endif
33c296d4dcSQian Cai 
344f0bd808SMike Rapoport #ifndef flush_cache_range
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)35c296d4dcSQian Cai static inline void flush_cache_range(struct vm_area_struct *vma,
36c296d4dcSQian Cai 				     unsigned long start,
37c296d4dcSQian Cai 				     unsigned long end)
38c296d4dcSQian Cai {
39c296d4dcSQian Cai }
404f0bd808SMike Rapoport #endif
41c296d4dcSQian Cai 
424f0bd808SMike Rapoport #ifndef flush_cache_page
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)43c296d4dcSQian Cai static inline void flush_cache_page(struct vm_area_struct *vma,
44c296d4dcSQian Cai 				    unsigned long vmaddr,
45c296d4dcSQian Cai 				    unsigned long pfn)
46c296d4dcSQian Cai {
47c296d4dcSQian Cai }
484f0bd808SMike Rapoport #endif
49c296d4dcSQian Cai 
5076b3b58fSChristoph Hellwig #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
flush_dcache_page(struct page * page)51c296d4dcSQian Cai static inline void flush_dcache_page(struct page *page)
52c296d4dcSQian Cai {
53c296d4dcSQian Cai }
5408b0b005SMatthew Wilcox (Oracle) 
5576b3b58fSChristoph Hellwig #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
5608b0b005SMatthew Wilcox (Oracle) #endif
5776b3b58fSChristoph Hellwig 
584f0bd808SMike Rapoport #ifndef flush_dcache_mmap_lock
flush_dcache_mmap_lock(struct address_space * mapping)59c296d4dcSQian Cai static inline void flush_dcache_mmap_lock(struct address_space *mapping)
60c296d4dcSQian Cai {
61c296d4dcSQian Cai }
624f0bd808SMike Rapoport #endif
63c296d4dcSQian Cai 
644f0bd808SMike Rapoport #ifndef flush_dcache_mmap_unlock
flush_dcache_mmap_unlock(struct address_space * mapping)65c296d4dcSQian Cai static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
66c296d4dcSQian Cai {
67c296d4dcSQian Cai }
684f0bd808SMike Rapoport #endif
69c296d4dcSQian Cai 
704f0bd808SMike Rapoport #ifndef flush_icache_range
flush_icache_range(unsigned long start,unsigned long end)71c296d4dcSQian Cai static inline void flush_icache_range(unsigned long start, unsigned long end)
72c296d4dcSQian Cai {
73c296d4dcSQian Cai }
744f0bd808SMike Rapoport #endif
75c296d4dcSQian Cai 
761268c333SChristoph Hellwig #ifndef flush_icache_user_range
771268c333SChristoph Hellwig #define flush_icache_user_range flush_icache_range
781268c333SChristoph Hellwig #endif
791268c333SChristoph Hellwig 
80885f7f8eSChristoph Hellwig #ifndef flush_icache_user_page
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)81885f7f8eSChristoph Hellwig static inline void flush_icache_user_page(struct vm_area_struct *vma,
82c296d4dcSQian Cai 					   struct page *page,
83c296d4dcSQian Cai 					   unsigned long addr, int len)
84c296d4dcSQian Cai {
85c296d4dcSQian Cai }
864f0bd808SMike Rapoport #endif
87c296d4dcSQian Cai 
884f0bd808SMike Rapoport #ifndef flush_cache_vmap
flush_cache_vmap(unsigned long start,unsigned long end)89c296d4dcSQian Cai static inline void flush_cache_vmap(unsigned long start, unsigned long end)
90c296d4dcSQian Cai {
91c296d4dcSQian Cai }
924f0bd808SMike Rapoport #endif
93c296d4dcSQian Cai 
94*7a92fc8bSAlexandre Ghiti #ifndef flush_cache_vmap_early
flush_cache_vmap_early(unsigned long start,unsigned long end)95*7a92fc8bSAlexandre Ghiti static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
96*7a92fc8bSAlexandre Ghiti {
97*7a92fc8bSAlexandre Ghiti }
98*7a92fc8bSAlexandre Ghiti #endif
99*7a92fc8bSAlexandre Ghiti 
1004f0bd808SMike Rapoport #ifndef flush_cache_vunmap
flush_cache_vunmap(unsigned long start,unsigned long end)101c296d4dcSQian Cai static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
102c296d4dcSQian Cai {
103c296d4dcSQian Cai }
1044f0bd808SMike Rapoport #endif
1055c01b46bSArnd Bergmann 
1064f0bd808SMike Rapoport #ifndef copy_to_user_page
1075c01b46bSArnd Bergmann #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
108f68aa5b4SMike Frysinger 	do { \
1092b420aafSAlexander Potapenko 		instrument_copy_to_user((void __user *)dst, src, len); \
110f68aa5b4SMike Frysinger 		memcpy(dst, src, len); \
111885f7f8eSChristoph Hellwig 		flush_icache_user_page(vma, page, vaddr, len); \
112f68aa5b4SMike Frysinger 	} while (0)
1134f0bd808SMike Rapoport #endif
1144f0bd808SMike Rapoport 
1152b420aafSAlexander Potapenko 
1164f0bd808SMike Rapoport #ifndef copy_from_user_page
1175c01b46bSArnd Bergmann #define copy_from_user_page(vma, page, vaddr, dst, src, len)		  \
1182b420aafSAlexander Potapenko 	do {								  \
1192b420aafSAlexander Potapenko 		instrument_copy_from_user_before(dst, (void __user *)src, \
1202b420aafSAlexander Potapenko 						 len);			  \
1212b420aafSAlexander Potapenko 		memcpy(dst, src, len);					  \
1222b420aafSAlexander Potapenko 		instrument_copy_from_user_after(dst, (void __user *)src, len, \
1232b420aafSAlexander Potapenko 						0);			  \
1242b420aafSAlexander Potapenko 	} while (0)
1254f0bd808SMike Rapoport #endif
1265c01b46bSArnd Bergmann 
12792a73bd2SChristoph Hellwig #endif /* _ASM_GENERIC_CACHEFLUSH_H */
128