1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/cacheflush.h
4 *
5 * Copyright (C) 1999-2002 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
7 */
8 #ifndef __ASM_CACHEFLUSH_H
9 #define __ASM_CACHEFLUSH_H
10
11 #include <linux/kgdb.h>
12 #include <linux/mm.h>
13
14 /*
15 * This flag is used to indicate that the page pointed to by a pte is clean
16 * and does not require cleaning before returning it to the user.
17 */
18 #define PG_dcache_clean PG_arch_1
19
20 /*
21 * MM Cache Management
22 * ===================
23 *
24 * The arch/arm64/mm/cache.S implements these methods.
25 *
26 * Start addresses are inclusive and end addresses are exclusive; start
27 * addresses should be rounded down, end addresses up.
28 *
29 * See Documentation/core-api/cachetlb.rst for more information. Please note that
30 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
31 * VIPT I-cache.
32 *
33 * flush_icache_range(start, end)
34 *
35 * Ensure coherency between the I-cache and the D-cache in the
36 * region described by start, end.
37 * - start - virtual start address
38 * - end - virtual end address
39 *
40 * invalidate_icache_range(start, end)
41 *
42 * Invalidate the I-cache in the region described by start, end.
43 * - start - virtual start address
44 * - end - virtual end address
45 *
46 * __flush_cache_user_range(start, end)
47 *
48 * Ensure coherency between the I-cache and the D-cache in the
49 * region described by start, end.
50 * - start - virtual start address
51 * - end - virtual end address
52 *
53 * __flush_dcache_area(kaddr, size)
54 *
55 * Ensure that the data held in page is written back.
56 * - kaddr - page address
57 * - size - region size
58 */
59 extern void __flush_icache_range(unsigned long start, unsigned long end);
60 extern int invalidate_icache_range(unsigned long start, unsigned long end);
61 extern void __flush_dcache_area(void *addr, size_t len);
62 extern void __inval_dcache_area(void *addr, size_t len);
63 extern void __clean_dcache_area_poc(void *addr, size_t len);
64 extern void __clean_dcache_area_pop(void *addr, size_t len);
65 extern void __clean_dcache_area_pou(void *addr, size_t len);
66 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
67 extern void sync_icache_aliases(void *kaddr, unsigned long len);
68
flush_icache_range(unsigned long start,unsigned long end)69 static inline void flush_icache_range(unsigned long start, unsigned long end)
70 {
71 __flush_icache_range(start, end);
72
73 /*
74 * IPI all online CPUs so that they undergo a context synchronization
75 * event and are forced to refetch the new instructions.
76 */
77
78 /*
79 * KGDB performs cache maintenance with interrupts disabled, so we
80 * will deadlock trying to IPI the secondary CPUs. In theory, we can
81 * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
82 * just means that KGDB will elide the maintenance altogether! As it
83 * turns out, KGDB uses IPIs to round-up the secondary CPUs during
84 * the patching operation, so we don't need extra IPIs here anyway.
85 * In which case, add a KGDB-specific bodge and return early.
86 */
87 if (in_dbg_master())
88 return;
89
90 kick_all_cpus_sync();
91 }
92 #define flush_icache_range flush_icache_range
93
94 /*
95 * Cache maintenance functions used by the DMA API. No to be used directly.
96 */
97 extern void __dma_map_area(const void *, size_t, int);
98 extern void __dma_unmap_area(const void *, size_t, int);
99 extern void __dma_flush_area(const void *, size_t);
100
101 /*
102 * Copy user data from/to a page which is mapped into a different
103 * processes address space. Really, we want to allow our "user
104 * space" model to handle this.
105 */
106 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
107 unsigned long, void *, const void *, unsigned long);
108 #define copy_to_user_page copy_to_user_page
109
110 /*
111 * flush_dcache_page is used when the kernel has written to the page
112 * cache page at virtual address page->virtual.
113 *
114 * If this page isn't mapped (ie, page_mapping == NULL), or it might
115 * have userspace mappings, then we _must_ always clean + invalidate
116 * the dcache entries associated with the kernel mapping.
117 *
118 * Otherwise we can defer the operation, and clean the cache when we are
119 * about to change to user space. This is the same method as used on SPARC64.
120 * See update_mmu_cache for the user space part.
121 */
122 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
123 extern void flush_dcache_page(struct page *);
124
__flush_icache_all(void)125 static __always_inline void __flush_icache_all(void)
126 {
127 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
128 return;
129
130 asm("ic ialluis");
131 dsb(ish);
132 }
133
134 int set_memory_valid(unsigned long addr, int numpages, int enable);
135
136 int set_direct_map_invalid_noflush(struct page *page);
137 int set_direct_map_default_noflush(struct page *page);
138 bool kernel_page_present(struct page *page);
139
140 #include <asm-generic/cacheflush.h>
141
142 #endif /* __ASM_CACHEFLUSH_H */
143