xref: /dragonfly/sys/dev/drm/include/asm/cacheflush.h (revision a4da4a90)
1 /*
2  * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #ifndef _ASM_CACHEFLUSH_H_
28 #define _ASM_CACHEFLUSH_H_
29 
30 #include <asm/special_insns.h>
31 
32 #include <vm/pmap.h>
33 #include <vm/vm_page.h>
34 
35 static inline int
36 set_memory_uc(unsigned long addr, int numpages)
37 {
38 	pmap_change_attr(addr, numpages, PAT_UNCACHED);
39 	return 0;
40 }
41 
42 static inline int set_memory_wc(unsigned long vaddr, int numpages)
43 {
44 	pmap_change_attr(vaddr, numpages, PAT_WRITE_COMBINING);
45 	return 0;
46 }
47 
48 static inline int set_memory_wb(unsigned long vaddr, int numpages)
49 {
50 	pmap_change_attr(vaddr, numpages, PAT_WRITE_BACK);
51 	return 0;
52 }
53 
54 static inline int set_pages_uc(struct page *page, int num_pages)
55 {
56 	struct vm_page *p = (struct vm_page *)page;
57 
58 	pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(p)),
59 			 num_pages, PAT_UNCACHED);
60 
61 	return 0;
62 }
63 
64 static inline int set_pages_wb(struct page *page, int num_pages)
65 {
66 	struct vm_page *p = (struct vm_page *)page;
67 
68 	pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(p)),
69 			 num_pages, PAT_WRITE_BACK);
70 
71 	return 0;
72 }
73 
74 static inline int
75 set_pages_array_uc(struct page **pages, int addrinarray)
76 {
77 	for (int i = 0; i < addrinarray; i++)
78 		pmap_page_set_memattr((struct vm_page *)pages[i], VM_MEMATTR_UNCACHEABLE);
79 
80 	return 0;
81 }
82 
83 static inline int
84 set_pages_array_wb(struct page **pages, int addrinarray)
85 {
86 	for (int i = 0; i < addrinarray; i++)
87 		pmap_page_set_memattr((struct vm_page *)pages[i], VM_MEMATTR_WRITE_BACK);
88 
89 	return 0;
90 }
91 
92 static inline int
93 set_pages_array_wc(struct page **pages, int addrinarray)
94 {
95 	for (int i = 0; i < addrinarray; i++)
96 		pmap_page_set_memattr((struct vm_page *)pages[i], VM_MEMATTR_WRITE_COMBINING);
97 
98 	return 0;
99 }
100 
101 #endif	/* _ASM_CACHEFLUSH_H_ */
102