xref: /netbsd/sys/arch/powerpc/booke/booke_cache.c (revision 6550d01e)
1 /*-
2  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
7  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
8  *
9  * This material is based upon work supported by the Defense Advanced Research
10  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
11  * Contract No. N66001-09-C-2073.
12  * Approved for Public Release, Distribution Unlimited
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 /*
36  *
37  */
38 #include <sys/cdefs.h>
39 
40 __KERNEL_RCSID(0, "$NetBSD: booke_cache.c,v 1.2 2011/01/18 01:02:52 matt Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/cpu.h>
44 
45 #include <uvm/uvm_extern.h>
46 
47 static void inline
48 dcbf(vaddr_t va, vsize_t off)
49 {
50 	__asm volatile("dcbf\t%0,%1" : : "b" (va), "r" (off));
51 }
52 
53 static void inline
54 dcbst(vaddr_t va, vsize_t off)
55 {
56 	__asm volatile("dcbst\t%0,%1" : : "b" (va), "r" (off));
57 }
58 
59 static void inline
60 dcbi(vaddr_t va, vsize_t off)
61 {
62 	__asm volatile("dcbi\t%0,%1" : : "b" (va), "r" (off));
63 }
64 
65 static void inline
66 dcbz(vaddr_t va, vsize_t off)
67 {
68 	__asm volatile("dcbz\t%0,%1" : : "b" (va), "r" (off));
69 }
70 
71 static void inline
72 dcba(vaddr_t va, vsize_t off)
73 {
74 	__asm volatile("dcba\t%0,%1" : : "b" (va), "r" (off));
75 }
76 
77 static void inline
78 icbi(vaddr_t va, vsize_t off)
79 {
80 	__asm volatile("icbi\t%0,%1" : : "b" (va), "r" (off));
81 }
82 
83 static inline void
84 cache_op(vaddr_t va, vsize_t len, vsize_t line_size,
85 	void (*op)(vaddr_t, vsize_t))
86 {
87 	KASSERT(line_size > 0);
88 
89 	if (len == 0)
90 		return;
91 
92 	/* Make sure we flush all cache lines */
93 	len += va & (line_size - 1);
94 	va &= -line_size;
95 
96 	for (vsize_t i = 0; i < len; i += line_size)
97 		(*op)(va, i);
98 	__asm volatile("mbar 0");
99 }
100 
101 void
102 dcache_wb_page(vaddr_t va)
103 {
104 	cache_op(va, PAGE_SIZE, curcpu()->ci_ci.dcache_line_size, dcbst);
105 }
106 
107 void
108 dcache_wbinv_page(vaddr_t va)
109 {
110 	cache_op(va, PAGE_SIZE, curcpu()->ci_ci.dcache_line_size, dcbf);
111 }
112 
113 void
114 dcache_inv_page(vaddr_t va)
115 {
116 	cache_op(va, PAGE_SIZE, curcpu()->ci_ci.dcache_line_size, dcbi);
117 }
118 
119 void
120 dcache_zero_page(vaddr_t va)
121 {
122 	cache_op(va, PAGE_SIZE, curcpu()->ci_ci.dcache_line_size, dcbz);
123 }
124 
125 void
126 icache_inv_page(vaddr_t va)
127 {
128 	__asm("msync");
129 	cache_op(va, PAGE_SIZE, curcpu()->ci_ci.icache_line_size, icbi);
130 	__asm("msync");
131 	/* synchronizing instruction will be the rfi to user mode */
132 }
133 
134 void
135 dcache_wb(vaddr_t va, vsize_t len)
136 {
137 	cache_op(va, len, curcpu()->ci_ci.dcache_line_size, dcbst);
138 }
139 
140 void
141 dcache_wbinv(vaddr_t va, vsize_t len)
142 {
143 	cache_op(va, len, curcpu()->ci_ci.dcache_line_size, dcbf);
144 }
145 
146 void
147 dcache_inv(vaddr_t va, vsize_t len)
148 {
149 	cache_op(va, len, curcpu()->ci_ci.dcache_line_size, dcbi);
150 }
151 
152 void
153 icache_inv(vaddr_t va, vsize_t len)
154 {
155 	__asm volatile("msync");
156 	cache_op(va, len, curcpu()->ci_ci.icache_line_size, icbi);
157 	__asm volatile("msync");
158 }
159