xref: /openbsd/sys/arch/sh/sh/mmu_sh4.c (revision 0f1d1626)
1 /*	$OpenBSD: mmu_sh4.c,v 1.3 2016/03/05 17:16:33 tobiasu Exp $	*/
2 /*	$NetBSD: mmu_sh4.c,v 1.11 2006/03/04 01:13:35 uwe Exp $	*/
3 
4 /*-
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by UCHIYAMA Yasushi.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 
36 #include <sh/pte.h>	/* OpenBSD/sh specific PTE */
37 #include <sh/mmu.h>
38 #include <sh/mmu_sh4.h>
39 
40 #define	SH4_MMU_HAZARD	__asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;")
41 
42 static inline void __sh4_itlb_invalidate_all(void);
43 
44 static inline void
__sh4_itlb_invalidate_all(void)45 __sh4_itlb_invalidate_all(void)
46 {
47 	_reg_write_4(SH4_ITLB_AA, 0);
48 	_reg_write_4(SH4_ITLB_AA | (1 << SH4_ITLB_E_SHIFT), 0);
49 	_reg_write_4(SH4_ITLB_AA | (2 << SH4_ITLB_E_SHIFT), 0);
50 	_reg_write_4(SH4_ITLB_AA | (3 << SH4_ITLB_E_SHIFT), 0);
51 }
52 
53 void
sh4_mmu_start(void)54 sh4_mmu_start(void)
55 {
56 	/* Zero clear all TLB entry */
57 	_reg_write_4(SH4_MMUCR, 0);	/* zero wired entry */
58 	sh_tlb_invalidate_all();
59 
60 	/* Set current ASID to 0 */
61 	sh_tlb_set_asid(0);
62 
63 	/*
64 	 * User can't access store queue
65 	 * make wired entry for u-area.
66 	 */
67 	_reg_write_4(SH4_MMUCR, SH4_MMUCR_AT | SH4_MMUCR_TI | SH4_MMUCR_SQMD |
68 	    (SH4_UTLB_ENTRY - UPAGES) << SH4_MMUCR_URB_SHIFT);
69 
70 	SH4_MMU_HAZARD;
71 }
72 
73 void
sh4_tlb_invalidate_addr(int asid,vaddr_t va)74 sh4_tlb_invalidate_addr(int asid, vaddr_t va)
75 {
76 	uint32_t pteh;
77 	int s;
78 
79 	va &= SH4_PTEH_VPN_MASK;
80 	s = _cpu_exception_suspend();
81 
82 	/* Save current ASID */
83 	pteh = _reg_read_4(SH4_PTEH);
84 	/* Set ASID for associative write */
85 	_reg_write_4(SH4_PTEH, asid);
86 
87 	/* Associative write(UTLB/ITLB). not required ITLB invalidate. */
88 	RUN_P2;
89 	_reg_write_4(SH4_UTLB_AA | SH4_UTLB_A, va); /* Clear D, V */
90 	RUN_P1;
91 	/* Restore ASID */
92 	_reg_write_4(SH4_PTEH, pteh);
93 
94 	_cpu_exception_resume(s);
95 }
96 
97 void
sh4_tlb_invalidate_asid(int asid)98 sh4_tlb_invalidate_asid(int asid)
99 {
100 	uint32_t a;
101 	int e, s;
102 
103 	s = _cpu_exception_suspend();
104 	/* Invalidate entry attribute to ASID */
105 	RUN_P2;
106 	for (e = 0; e < SH4_UTLB_ENTRY; e++) {
107 		a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
108 		if ((_reg_read_4(a) & SH4_UTLB_AA_ASID_MASK) == asid)
109 			_reg_write_4(a, 0);
110 	}
111 
112 	__sh4_itlb_invalidate_all();
113 	RUN_P1;
114 	_cpu_exception_resume(s);
115 }
116 
117 void
sh4_tlb_invalidate_all(void)118 sh4_tlb_invalidate_all(void)
119 {
120 	uint32_t a;
121 	int e, eend, s;
122 
123 	s = _cpu_exception_suspend();
124 	/* If non-wired entry limit is zero, clear all entry. */
125 	a = _reg_read_4(SH4_MMUCR) & SH4_MMUCR_URB_MASK;
126 	eend = a ? (a >> SH4_MMUCR_URB_SHIFT) : SH4_UTLB_ENTRY;
127 
128 	RUN_P2;
129 	for (e = 0; e < eend; e++) {
130 		a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
131 		_reg_write_4(a, 0);
132 		a = SH4_UTLB_DA1 | (e << SH4_UTLB_E_SHIFT);
133 		_reg_write_4(a, 0);
134 	}
135 	__sh4_itlb_invalidate_all();
136 	_reg_write_4(SH4_ITLB_DA1, 0);
137 	_reg_write_4(SH4_ITLB_DA1 | (1 << SH4_ITLB_E_SHIFT), 0);
138 	_reg_write_4(SH4_ITLB_DA1 | (2 << SH4_ITLB_E_SHIFT), 0);
139 	_reg_write_4(SH4_ITLB_DA1 | (3 << SH4_ITLB_E_SHIFT), 0);
140 	RUN_P1;
141 	_cpu_exception_resume(s);
142 }
143 
144 void
sh4_tlb_update(int asid,vaddr_t va,uint32_t pte)145 sh4_tlb_update(int asid, vaddr_t va, uint32_t pte)
146 {
147 	uint32_t oasid;
148 	uint32_t ptel;
149 	int s;
150 
151 	KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0);
152 
153 	s = _cpu_exception_suspend();
154 	/* Save old ASID */
155 	oasid = _reg_read_4(SH4_PTEH) & SH4_PTEH_ASID_MASK;
156 
157 	/* Invalidate old entry (if any) */
158 	sh4_tlb_invalidate_addr(asid, va);
159 
160 	_reg_write_4(SH4_PTEH, asid);
161 	/* Load new entry */
162 	_reg_write_4(SH4_PTEH, (va & ~PGOFSET) | asid);
163 	ptel = pte & PG_HW_BITS;
164 	if (pte & _PG_PCMCIA) {
165 		_reg_write_4(SH4_PTEA,
166 		    (pte >> _PG_PCMCIA_SHIFT) & SH4_PTEA_SA_MASK);
167 	} else {
168 		_reg_write_4(SH4_PTEA, 0);
169 	}
170 	_reg_write_4(SH4_PTEL, ptel);
171 	__asm volatile("ldtlb; nop");
172 
173 	/* Restore old ASID */
174 	if (asid != oasid)
175 		_reg_write_4(SH4_PTEH, oasid);
176 	_cpu_exception_resume(s);
177 }
178