xref: /dragonfly/sys/dev/drm/drm_cache.c (revision 3d33658b)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30 
31 #include <linux/export.h>
32 #include <drm/drmP.h>
33 #include <asm/cpufeature.h>
34 
35 /*
36  * clflushopt is an unordered instruction which needs fencing with mfence or
37  * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
38  * in the caller.
39  */
40 static void
41 drm_clflush_page(struct page *page)
42 {
43 	uint8_t *page_virtual;
44 	unsigned int i;
45 	const int size = cpu_clflush_line_size;
46 
47 	if (unlikely(page == NULL))
48 		return;
49 
50 	page_virtual = kmap_atomic(page);
51 	for (i = 0; i < PAGE_SIZE; i += size)
52 		clflushopt(page_virtual + i);
53 	kunmap_atomic(page_virtual);
54 }
55 
56 static void drm_cache_flush_clflush(struct page *pages[],
57 				    unsigned long num_pages)
58 {
59 	unsigned long i;
60 
61 	mb();
62 	for (i = 0; i < num_pages; i++)
63 		drm_clflush_page(*pages++);
64 	mb();
65 }
66 
67 void
68 drm_clflush_pages(struct page *pages[], unsigned long num_pages)
69 {
70 	pmap_invalidate_cache_pages((struct vm_page **)pages, num_pages);
71 
72 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
73 		drm_cache_flush_clflush(pages, num_pages);
74 		return;
75 	}
76 
77 	cpu_wbinvd_on_all_cpus();
78 }
79 EXPORT_SYMBOL(drm_clflush_pages);
80 
81 void
82 drm_clflush_sg(struct sg_table *st)
83 {
84 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
85 		struct sg_page_iter sg_iter;
86 
87 		mb();
88 		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
89 			drm_clflush_page(sg_page_iter_page(&sg_iter));
90 		mb();
91 
92 		return;
93 	}
94 
95 	cpu_wbinvd_on_all_cpus();
96 }
97 EXPORT_SYMBOL(drm_clflush_sg);
98 
99 void
100 drm_clflush_virt_range(void *in_addr, unsigned long length)
101 {
102 	char *addr = in_addr;
103 
104 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
105 		const int size = cpu_clflush_line_size;
106 		char *end = addr + length;
107 		addr = (void *)(((unsigned long)addr) & -size);
108 		mb();
109 		for (; addr < end; addr += size)
110 			clflushopt(addr);
111 		clflushopt(end - 1); /* force serialisation */
112 		mb();
113 		return;
114 	}
115 
116 	cpu_wbinvd_on_all_cpus();
117 }
118 EXPORT_SYMBOL(drm_clflush_virt_range);
119