xref: /linux/arch/powerpc/include/asm/cacheflush.h (revision 52338415)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  */
4 #ifndef _ASM_POWERPC_CACHEFLUSH_H
5 #define _ASM_POWERPC_CACHEFLUSH_H
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/mm.h>
10 #include <asm/cputable.h>
11 
12 /*
13  * No cache flushing is required when address mappings are changed,
14  * because the caches on PowerPCs are physically addressed.
15  */
16 #define flush_cache_all()			do { } while (0)
17 #define flush_cache_mm(mm)			do { } while (0)
18 #define flush_cache_dup_mm(mm)			do { } while (0)
19 #define flush_cache_range(vma, start, end)	do { } while (0)
20 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
21 #define flush_icache_page(vma, page)		do { } while (0)
22 #define flush_cache_vunmap(start, end)		do { } while (0)
23 
24 #ifdef CONFIG_PPC_BOOK3S_64
25 /*
26  * Book3s has no ptesync after setting a pte, so without this ptesync it's
27  * possible for a kernel virtual mapping access to return a spurious fault
28  * if it's accessed right after the pte is set. The page fault handler does
29  * not expect this type of fault. flush_cache_vmap is not exactly the right
30  * place to put this, but it seems to work well enough.
31  */
32 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
33 {
34 	asm volatile("ptesync" ::: "memory");
35 }
36 #else
37 static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
38 #endif
39 
40 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
41 extern void flush_dcache_page(struct page *page);
42 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
43 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
44 
45 extern void flush_icache_range(unsigned long, unsigned long);
46 extern void flush_icache_user_range(struct vm_area_struct *vma,
47 				    struct page *page, unsigned long addr,
48 				    int len);
49 extern void __flush_dcache_icache(void *page_va);
50 extern void flush_dcache_icache_page(struct page *page);
51 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
52 extern void __flush_dcache_icache_phys(unsigned long physaddr);
53 #else
54 static inline void __flush_dcache_icache_phys(unsigned long physaddr)
55 {
56 	BUG();
57 }
58 #endif
59 
60 /*
61  * Write any modified data cache blocks out to memory and invalidate them.
62  * Does not invalidate the corresponding instruction cache blocks.
63  */
64 static inline void flush_dcache_range(unsigned long start, unsigned long stop)
65 {
66 	unsigned long shift = l1_cache_shift();
67 	unsigned long bytes = l1_cache_bytes();
68 	void *addr = (void *)(start & ~(bytes - 1));
69 	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
70 	unsigned long i;
71 
72 	if (IS_ENABLED(CONFIG_PPC64)) {
73 		mb();	/* sync */
74 		isync();
75 	}
76 
77 	for (i = 0; i < size >> shift; i++, addr += bytes)
78 		dcbf(addr);
79 	mb();	/* sync */
80 
81 	if (IS_ENABLED(CONFIG_PPC64))
82 		isync();
83 }
84 
85 /*
86  * Write any modified data cache blocks out to memory.
87  * Does not invalidate the corresponding cache lines (especially for
88  * any corresponding instruction cache).
89  */
90 static inline void clean_dcache_range(unsigned long start, unsigned long stop)
91 {
92 	unsigned long shift = l1_cache_shift();
93 	unsigned long bytes = l1_cache_bytes();
94 	void *addr = (void *)(start & ~(bytes - 1));
95 	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
96 	unsigned long i;
97 
98 	for (i = 0; i < size >> shift; i++, addr += bytes)
99 		dcbst(addr);
100 	mb();	/* sync */
101 }
102 
103 /*
104  * Like above, but invalidate the D-cache.  This is used by the 8xx
105  * to invalidate the cache so the PPC core doesn't get stale data
106  * from the CPM (no cache snooping here :-).
107  */
108 static inline void invalidate_dcache_range(unsigned long start,
109 					   unsigned long stop)
110 {
111 	unsigned long shift = l1_cache_shift();
112 	unsigned long bytes = l1_cache_bytes();
113 	void *addr = (void *)(start & ~(bytes - 1));
114 	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
115 	unsigned long i;
116 
117 	for (i = 0; i < size >> shift; i++, addr += bytes)
118 		dcbi(addr);
119 	mb();	/* sync */
120 }
121 
122 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
123 	do { \
124 		memcpy(dst, src, len); \
125 		flush_icache_user_range(vma, page, vaddr, len); \
126 	} while (0)
127 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
128 	memcpy(dst, src, len)
129 
130 #endif /* __KERNEL__ */
131 
132 #endif /* _ASM_POWERPC_CACHEFLUSH_H */
133