xref: /openbsd/lib/libarch/alpha/bwx.c (revision cecf84d4)
1 /* $OpenBSD: bwx.c,v 1.8 2014/12/13 16:26:13 miod Exp $ */
2 /*-
3  * Copyright (c) 1998 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/mman.h>
30 #include <sys/fcntl.h>
31 #include <sys/sysctl.h>
32 #include <err.h>
33 #include <paths.h>
34 #include <machine/bwx.h>
35 #include <machine/cpu.h>
36 #include <machine/sysarch.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 
40 #include "io.h"
41 
42 #define	round_page(x)	(((x) + PAGE_MASK) & ~PAGE_MASK)
43 #define	trunc_page(x)	((x) & ~PAGE_MASK)
44 
45 #define PATH_APERTURE "/dev/xf86"
46 
47 #define mb()	__asm__ volatile("mb"  : : : "memory")
48 #define wmb()	__asm__ volatile("wmb" : : : "memory")
49 
50 static int		mem_fd = -1;	/* file descriptor to /dev/mem */
51 static void	       *bwx_int1_ports = MAP_FAILED; /* mapped int1 io ports */
52 static void	       *bwx_int2_ports = MAP_FAILED; /* mapped int2 io ports */
53 static void	       *bwx_int4_ports = MAP_FAILED; /* mapped int4 io ports */
54 static u_int64_t	bwx_io_base;	/* physical address of ports */
55 static u_int64_t	bwx_mem_base;	/* physical address of bwx mem */
56 
57 static void
58 bwx_open_mem(void)
59 {
60 
61 	if (mem_fd != -1)
62 		return;
63 	mem_fd = open(_PATH_MEM, O_RDWR);
64 	if (mem_fd < 0)
65 		mem_fd = open(PATH_APERTURE, O_RDWR);
66 	if (mem_fd < 0)
67 		err(1, "Failed to open both %s and %s", _PATH_MEM,
68 		    PATH_APERTURE);
69 }
70 
71 static void
72 bwx_close_mem(void)
73 {
74 
75 	if (mem_fd != -1) {
76 		close(mem_fd);
77 		mem_fd = -1;
78 	}
79 }
80 
81 static void
82 bwx_init(void)
83 {
84 	size_t len = sizeof(u_int64_t);
85 	int error;
86 	int mib[3];
87 
88 	mib[0] = CTL_MACHDEP;
89 	mib[1] = CPU_CHIPSET;
90 	mib[2] = CPU_CHIPSET_PORTS;
91 	if ((error = sysctl(mib, 3, &bwx_io_base, &len, NULL, 0)) < 0)
92 		err(1, "machdep.chipset.ports_base");
93 	mib[2] = CPU_CHIPSET_MEM;
94 	if ((error = sysctl(mib, 3, &bwx_mem_base, &len, 0, 0)) < 0)
95 		err(1, "machdep.chipset.memory");
96 }
97 
98 static int
99 bwx_ioperm(u_int32_t from, u_int32_t num, int on)
100 {
101 	u_int32_t start, end;
102 
103 	if (bwx_int1_ports == MAP_FAILED)
104 		bwx_init();
105 
106 	if (!on)
107 		return -1;		/* XXX can't unmap yet */
108 
109 	if (bwx_int1_ports != MAP_FAILED)
110 		return 0;
111 
112 	bwx_open_mem();
113 	start = trunc_page(from);
114 	end = round_page(from + num);
115 	if ((bwx_int1_ports = mmap(0, end-start, PROT_READ|PROT_WRITE,
116 	    MAP_SHARED, mem_fd, bwx_io_base + BWX_EV56_INT1 + start)) ==
117 	    MAP_FAILED)
118 		err(1, "mmap int1");
119 	if ((bwx_int2_ports = mmap(0, end-start, PROT_READ|PROT_WRITE,
120 	    MAP_SHARED, mem_fd, bwx_io_base + BWX_EV56_INT2 + start)) ==
121 	    MAP_FAILED)
122 		err(1, "mmap int2");
123 	if ((bwx_int4_ports = mmap(0, end-start, PROT_READ|PROT_WRITE,
124 	    MAP_SHARED, mem_fd, bwx_io_base + BWX_EV56_INT4 + start)) ==
125 	    MAP_FAILED)
126 		err(1, "mmap int4");
127 	bwx_close_mem();
128 	return 0;
129 }
130 
131 static u_int8_t
132 bwx_inb(u_int32_t port)
133 {
134 	mb();
135 	return alpha_ldbu(bwx_int1_ports + port);
136 }
137 
138 static u_int16_t
139 bwx_inw(u_int32_t port)
140 {
141 	mb();
142 	return alpha_ldwu(bwx_int2_ports + port);
143 }
144 
145 static u_int32_t
146 bwx_inl(u_int32_t port)
147 {
148 	mb();
149 	return alpha_ldlu(bwx_int4_ports + port);
150 }
151 
152 static void
153 bwx_outb(u_int32_t port, u_int8_t val)
154 {
155 	alpha_stb(bwx_int1_ports + port, val);
156 	mb();
157 	wmb();
158 }
159 
160 static void
161 bwx_outw(u_int32_t port, u_int16_t val)
162 {
163 	alpha_stw(bwx_int2_ports + port, val);
164 	mb();
165 	wmb();
166 }
167 
168 static void
169 bwx_outl(u_int32_t port, u_int32_t val)
170 {
171 	alpha_stl(bwx_int4_ports + port, val);
172 	mb();
173 	wmb();
174 }
175 
176 struct bwx_mem_handle {
177 	void	*virt1;		/* int1 address in user address-space */
178 	void	*virt2;		/* int2 address in user address-space */
179 	void	*virt4;		/* int4 address in user address-space */
180 };
181 
182 static void *
183 bwx_map_memory(u_int32_t address, u_int32_t size)
184 {
185 	struct bwx_mem_handle *h;
186 	size_t sz = (size_t)size << 5;
187 
188 	h = malloc(sizeof(struct bwx_mem_handle));
189 	if (h == NULL) return NULL;
190 	bwx_open_mem();
191 	h->virt1 = mmap(0, sz, PROT_READ|PROT_WRITE, MAP_SHARED,
192 	    mem_fd, bwx_mem_base + BWX_EV56_INT1 + address);
193 	if (h->virt1 == MAP_FAILED) {
194 		bwx_close_mem();
195 		free(h);
196 		return NULL;
197 	}
198 	h->virt2 = mmap(0, sz, PROT_READ|PROT_WRITE, MAP_SHARED,
199 	    mem_fd, bwx_mem_base + BWX_EV56_INT2 + address);
200 	if (h->virt2 == MAP_FAILED) {
201 		munmap(h->virt1, sz);
202 		bwx_close_mem();
203 		free(h);
204 		return NULL;
205 	}
206 	h->virt4 = mmap(0, sz, PROT_READ|PROT_WRITE, MAP_SHARED,
207 	    mem_fd, bwx_mem_base + BWX_EV56_INT4 + address);
208 	if (h->virt4 == MAP_FAILED) {
209 		munmap(h->virt1, sz);
210 		munmap(h->virt2, sz);
211 		bwx_close_mem();
212 		free(h);
213 		return NULL;
214 	}
215 	bwx_close_mem();
216 	return h;
217 }
218 
219 static void
220 bwx_unmap_memory(void *handle, u_int32_t size)
221 {
222 	struct bwx_mem_handle *h = handle;
223 	size_t sz = (size_t)size << 5;
224 
225 	munmap(h->virt1, sz);
226 	munmap(h->virt2, sz);
227 	munmap(h->virt4, sz);
228 	free(h);
229 }
230 
231 static u_int8_t
232 bwx_readb(void *handle, u_int32_t offset)
233 {
234 	struct bwx_mem_handle *h = handle;
235 
236 	return alpha_ldbu(h->virt1 + offset);
237 }
238 
239 static u_int16_t
240 bwx_readw(void *handle, u_int32_t offset)
241 {
242 	struct bwx_mem_handle *h = handle;
243 
244 	return alpha_ldwu(h->virt2 + offset);
245 }
246 
247 static u_int32_t
248 bwx_readl(void *handle, u_int32_t offset)
249 {
250 	struct bwx_mem_handle *h = handle;
251 
252 	return alpha_ldlu(h->virt4 + offset);
253 }
254 
255 static void
256 bwx_writeb(void *handle, u_int32_t offset, u_int8_t val)
257 {
258 	struct bwx_mem_handle *h = handle;
259 
260 	alpha_stb(h->virt1 + offset, val);
261 }
262 
263 static void
264 bwx_writew(void *handle, u_int32_t offset, u_int16_t val)
265 {
266 	struct bwx_mem_handle *h = handle;
267 
268 	alpha_stw(h->virt2 + offset, val);
269 }
270 
271 static void
272 bwx_writel(void *handle, u_int32_t offset, u_int32_t val)
273 {
274 	struct bwx_mem_handle *h = handle;
275 
276 	alpha_stl(h->virt4 + offset, val);
277 }
278 
279 struct io_ops bwx_io_ops = {
280 	bwx_ioperm,
281 	bwx_inb,
282 	bwx_inw,
283 	bwx_inl,
284 	bwx_outb,
285 	bwx_outw,
286 	bwx_outl,
287 	bwx_map_memory,
288 	bwx_unmap_memory,
289 	bwx_readb,
290 	bwx_readw,
291 	bwx_readl,
292 	bwx_writeb,
293 	bwx_writew,
294 	bwx_writel,
295 };
296