xref: /linux/arch/hexagon/include/asm/cacheflush.h (revision 0be3ff0c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Cache flush operations for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_CACHEFLUSH_H
9 #define _ASM_CACHEFLUSH_H
10 
11 #include <linux/mm_types.h>
12 
13 /* Cache flushing:
14  *
15  *  - flush_cache_all() flushes entire cache
16  *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
17  *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
18  *  - flush_cache_range(vma, start, end) flushes a range of pages
19  *  - flush_icache_range(start, end) flush a range of instructions
20  *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
21  *  - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
22  *
23  *  Need to doublecheck which one is really needed for ptrace stuff to work.
24  */
25 #define LINESIZE	32
26 #define LINEBITS	5
27 
28 /*
29  * Flush Dcache range through current map.
30  */
31 extern void flush_dcache_range(unsigned long start, unsigned long end);
32 #define flush_dcache_range flush_dcache_range
33 
34 /*
35  * Flush Icache range through current map.
36  */
37 extern void flush_icache_range(unsigned long start, unsigned long end);
38 #define flush_icache_range flush_icache_range
39 
40 /*
41  * Memory-management related flushes are there to ensure in non-physically
42  * indexed cache schemes that stale lines belonging to a given ASID aren't
43  * in the cache to confuse things.  The prototype Hexagon Virtual Machine
44  * only uses a single ASID for all user-mode maps, which should
45  * mean that they aren't necessary.  A brute-force, flush-everything
46  * implementation, with the name xxxxx_hexagon() is present in
47  * arch/hexagon/mm/cache.c, but let's not wire it up until we know
48  * it is needed.
49  */
50 extern void flush_cache_all_hexagon(void);
51 
52 /*
53  * This may or may not ever have to be non-null, depending on the
54  * virtual machine MMU.  For a native kernel, it's definitiely  a no-op
55  *
56  * This is also the place where deferred cache coherency stuff seems
57  * to happen, classically...  but instead we do it like ia64 and
58  * clean the cache when the PTE is set.
59  *
60  */
61 static inline void update_mmu_cache(struct vm_area_struct *vma,
62 					unsigned long address, pte_t *ptep)
63 {
64 	/*  generic_ptrace_pokedata doesn't wind up here, does it?  */
65 }
66 
67 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
68 		       unsigned long vaddr, void *dst, void *src, int len);
69 #define copy_to_user_page copy_to_user_page
70 
71 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
72 	memcpy(dst, src, len)
73 
74 extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
75 extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
76 
77 #include <asm-generic/cacheflush.h>
78 
79 #endif
80