xref: /netbsd/sys/arch/sh3/sh3/mmu_sh4.c (revision c4a72b64)
1 /*	$NetBSD: mmu_sh4.c,v 1.6 2002/11/04 01:31:43 itohy Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 
42 #include <sh3/pte.h>	/* NetBSD/sh3 specific PTE */
43 #include <sh3/mmu.h>
44 #include <sh3/mmu_sh4.h>
45 
46 #define	SH4_MMU_HAZARD	__asm__ __volatile__("nop;nop;nop;nop;nop;nop;nop;nop;")
47 
48 static __inline__ void __sh4_itlb_invalidate_all(void);
49 
50 static __inline__ void
51 __sh4_itlb_invalidate_all()
52 {
53 
54 	_reg_write_4(SH4_ITLB_AA, 0);
55 	_reg_write_4(SH4_ITLB_AA | (1 << SH4_ITLB_E_SHIFT), 0);
56 	_reg_write_4(SH4_ITLB_AA | (2 << SH4_ITLB_E_SHIFT), 0);
57 	_reg_write_4(SH4_ITLB_AA | (3 << SH4_ITLB_E_SHIFT), 0);
58 }
59 
60 void
61 sh4_mmu_start()
62 {
63 
64 	/* Zero clear all TLB entry */
65 	_reg_write_4(SH4_MMUCR, 0);	/* zero wired entry */
66 	sh_tlb_invalidate_all();
67 
68 	/* Set current ASID to 0 */
69 	sh_tlb_set_asid(0);
70 
71 	/*
72 	 * User can't access store queue
73 	 * make wired entry for u-area.
74 	 */
75 	_reg_write_4(SH4_MMUCR, SH4_MMUCR_AT | SH4_MMUCR_TI | SH4_MMUCR_SQMD |
76 	    (SH4_UTLB_ENTRY - UPAGES) << SH4_MMUCR_URB_SHIFT);
77 
78 	SH4_MMU_HAZARD;
79 }
80 
81 void
82 sh4_tlb_invalidate_addr(int asid, vaddr_t va)
83 {
84 	u_int32_t pteh;
85 	va &= SH4_PTEH_VPN_MASK;
86 
87 	/* Save current ASID */
88 	pteh = _reg_read_4(SH4_PTEH);
89 	/* Set ASID for associative write */
90 	_reg_write_4(SH4_PTEH, asid);
91 
92 	/* Associative write(UTLB/ITLB). not required ITLB invalidate. */
93 	RUN_P2;
94 	_reg_write_4(SH4_UTLB_AA | SH4_UTLB_A, va); /* Clear D, V */
95 	RUN_P1;
96 	/* Restore ASID */
97 	_reg_write_4(SH4_PTEH, pteh);
98 }
99 
100 void
101 sh4_tlb_invalidate_asid(int asid)
102 {
103 	u_int32_t a;
104 	int e;
105 
106 	/* Invalidate entry attribute to ASID */
107 	RUN_P2;
108 	for (e = 0; e < SH4_UTLB_ENTRY; e++) {
109 		a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
110 		if ((_reg_read_4(a) & SH4_UTLB_AA_ASID_MASK) == asid)
111 			_reg_write_4(a, 0);
112 	}
113 
114 	__sh4_itlb_invalidate_all();
115 	RUN_P1;
116 }
117 
118 void
119 sh4_tlb_invalidate_all()
120 {
121 	u_int32_t a;
122 	int e, eend;
123 
124 	/* If non-wired entry limit is zero, clear all entry. */
125 	a = _reg_read_4(SH4_MMUCR) & SH4_MMUCR_URB_MASK;
126 	eend = a ? (a >> SH4_MMUCR_URB_SHIFT) : SH4_UTLB_ENTRY;
127 
128 	RUN_P2;
129 	for (e = 0; e < eend; e++) {
130 		a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
131 		_reg_write_4(a, 0);
132 		a = SH4_UTLB_DA1 | (e << SH4_UTLB_E_SHIFT);
133 		_reg_write_4(a, 0);
134 	}
135 	__sh4_itlb_invalidate_all();
136 	_reg_write_4(SH4_ITLB_DA1, 0);
137 	_reg_write_4(SH4_ITLB_DA1 | (1 << SH4_ITLB_E_SHIFT), 0);
138 	_reg_write_4(SH4_ITLB_DA1 | (2 << SH4_ITLB_E_SHIFT), 0);
139 	_reg_write_4(SH4_ITLB_DA1 | (3 << SH4_ITLB_E_SHIFT), 0);
140 	RUN_P1;
141 }
142 
143 void
144 sh4_tlb_update(int asid, vaddr_t va, u_int32_t pte)
145 {
146 	u_int32_t oasid;
147 	u_int32_t ptel;
148 
149 	KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0);
150 
151 	/* Save old ASID */
152 	oasid = _reg_read_4(SH4_PTEH) & SH4_PTEH_ASID_MASK;
153 
154 	/* Invalidate old entry (if any) */
155 	sh4_tlb_invalidate_addr(asid, va);
156 
157 	_reg_write_4(SH4_PTEH, asid);
158 	/* Load new entry */
159 	_reg_write_4(SH4_PTEH, (va & ~PGOFSET) | asid);
160 	ptel = pte & PG_HW_BITS;
161 	if (pte & _PG_PCMCIA) {
162 		_reg_write_4(SH4_PTEA,
163 		    (pte >> _PG_PCMCIA_SHIFT) & SH4_PTEA_SA_MASK);
164 	} else {
165 		_reg_write_4(SH4_PTEA, 0);
166 	}
167 	_reg_write_4(SH4_PTEL, ptel);
168 	__asm__ __volatile__("ldtlb; nop");
169 
170 	/* Restore old ASID */
171 	if (asid != oasid)
172 		_reg_write_4(SH4_PTEH, oasid);
173 }
174