xref: /freebsd/sys/dev/bhnd/nvram/bhnd_nvram_iores.c (revision fdafd315)
1 /*-
2  * Copyright (c) 2016 Landon Fuller <landonf@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/malloc.h>
33 #include <sys/rman.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/bhnd/bhnd.h>
38 
39 #include "bhnd_nvram_private.h"
40 
41 #include "bhnd_nvram_io.h"
42 #include "bhnd_nvram_iovar.h"
43 
44 /**
45  * BHND resource-backed NVRAM I/O context.
46  */
47 struct bhnd_nvram_iores {
48 	struct bhnd_nvram_io	 io;		/**< common I/O instance state */
49 	struct bhnd_resource	*res;		/**< backing resource (borrowed ref) */
50 	size_t			 offset;	/**< offset within res */
51 	size_t			 size;		/**< size relative to the base offset */
52 	u_int			 bus_width;	/**< data type byte width to be used
53 						     when performing bus operations
54 						     on res. (1, 2, or 4 bytes) */
55 };
56 
57 BHND_NVRAM_IOPS_DEFN(iores);
58 
59 /**
60  * Allocate and return a new I/O context backed by a borrowed reference to @p r.
61  *
62  * The caller is responsible for deallocating the returned I/O context via
63  * bhnd_nvram_io_free().
64  *
65  * @param	r		The resource to be mapped by the returned I/O
66  *				context.
67  * @param	offset		Offset
68  * @param	bus_width	The required I/O width (1, 2, or 4 bytes) to be
69  *				used when reading from @p r.
70  *
71  * @retval	bhnd_nvram_io	success.
72  * @retval	NULL		if allocation fails, or an invalid argument
73  *				is supplied.
74  */
75 struct bhnd_nvram_io *
bhnd_nvram_iores_new(struct bhnd_resource * r,bus_size_t offset,bus_size_t size,u_int bus_width)76 bhnd_nvram_iores_new(struct bhnd_resource *r, bus_size_t offset,
77     bus_size_t size, u_int bus_width)
78 {
79 	struct bhnd_nvram_iores	*iores;
80 	rman_res_t		 r_start, r_size;
81 
82 	/* Verify the bus width */
83 	switch (bus_width) {
84 	case 1:
85 	case 2:
86 	case 4:
87 		/* valid */
88 		break;
89 	default:
90 		BHND_NV_LOG("invalid bus width %u\n", bus_width);
91 		return (NULL);
92 	}
93 
94 	/* offset/size must not exceed our internal size_t representation,
95 	 * or our bus_size_t usage (note that BUS_SPACE_MAXSIZE may be less
96 	 * than 2^(sizeof(bus_size_t) * 32). */
97 	if (size > SIZE_MAX || offset > SIZE_MAX) {
98 		BHND_NV_LOG("offset %#jx+%#jx exceeds SIZE_MAX\n",
99 		    (uintmax_t)offset, (uintmax_t)offset);
100 		return (NULL);
101 	}
102 
103 	if (size > BUS_SPACE_MAXSIZE || offset > BUS_SPACE_MAXSIZE)
104 	{
105 		BHND_NV_LOG("offset %#jx+%#jx exceeds BUS_SPACE_MAXSIZE\n",
106 		    (uintmax_t)offset, (uintmax_t)offset);
107 		return (NULL);
108 	}
109 
110 	/* offset/size fall within the resource's mapped range */
111 	r_size = rman_get_size(r->res);
112 	r_start = rman_get_start(r->res);
113 	if (r_size < offset || r_size < size || r_size - size < offset)
114 		return (NULL);
115 
116 	/* offset/size must be bus_width aligned  */
117 	if ((r_start + offset) % bus_width != 0) {
118 		BHND_NV_LOG("base address %#jx+%#jx not aligned to bus width "
119 		    "%u\n", (uintmax_t)r_start, (uintmax_t)offset, bus_width);
120 		return (NULL);
121 	}
122 
123 	if (size % bus_width != 0) {
124 		BHND_NV_LOG("size %#jx not aligned to bus width %u\n",
125 		    (uintmax_t)size, bus_width);
126 		return (NULL);
127 	}
128 
129 	/* Allocate and return the I/O context */
130 	iores = malloc(sizeof(*iores), M_BHND_NVRAM, M_WAITOK);
131 	iores->io.iops = &bhnd_nvram_iores_ops;
132 	iores->res = r;
133 	iores->offset = offset;
134 	iores->size = size;
135 	iores->bus_width = bus_width;
136 
137 	return (&iores->io);
138 }
139 
140 static void
bhnd_nvram_iores_free(struct bhnd_nvram_io * io)141 bhnd_nvram_iores_free(struct bhnd_nvram_io *io)
142 {
143 	free(io, M_BHND_NVRAM);
144 }
145 
146 static size_t
bhnd_nvram_iores_getsize(struct bhnd_nvram_io * io)147 bhnd_nvram_iores_getsize(struct bhnd_nvram_io *io)
148 {
149 	struct bhnd_nvram_iores	*iores = (struct bhnd_nvram_iores *)io;
150 	return (iores->size);
151 }
152 
153 static int
bhnd_nvram_iores_setsize(struct bhnd_nvram_io * io,size_t size)154 bhnd_nvram_iores_setsize(struct bhnd_nvram_io *io, size_t size)
155 {
156 	/* unsupported */
157 	return (ENODEV);
158 }
159 
160 static int
bhnd_nvram_iores_read_ptr(struct bhnd_nvram_io * io,size_t offset,const void ** ptr,size_t nbytes,size_t * navail)161 bhnd_nvram_iores_read_ptr(struct bhnd_nvram_io *io, size_t offset,
162     const void **ptr, size_t nbytes, size_t *navail)
163 {
164 	/* unsupported */
165 	return (ENODEV);
166 }
167 
168 static int
bhnd_nvram_iores_write_ptr(struct bhnd_nvram_io * io,size_t offset,void ** ptr,size_t nbytes,size_t * navail)169 bhnd_nvram_iores_write_ptr(struct bhnd_nvram_io *io, size_t offset,
170     void **ptr, size_t nbytes, size_t *navail)
171 {
172 	/* unsupported */
173 	return (ENODEV);
174 }
175 
176 /**
177  * Validate @p offset and @p nbytes:
178  *
179  * - Verify that @p offset is mapped by the backing resource.
180  * - If less than @p nbytes are available at @p offset, write the actual number
181  *   of bytes available to @p nbytes.
182  * - Verify that @p offset + @p nbytes are correctly aligned.
183  */
184 static int
bhnd_nvram_iores_validate_req(struct bhnd_nvram_iores * iores,size_t offset,size_t * nbytes)185 bhnd_nvram_iores_validate_req(struct bhnd_nvram_iores *iores, size_t offset,
186     size_t *nbytes)
187 {
188 	/* Verify offset falls within the resource range */
189 	if (offset > iores->size)
190 		return (ENXIO);
191 
192 	/* Check for eof */
193 	if (offset == iores->size) {
194 		*nbytes = 0;
195 		return (0);
196 	}
197 
198 	/* Verify offset alignment */
199 	if (offset % iores->bus_width != 0)
200 		return (EFAULT);
201 
202 	/* Limit nbytes to available range and verify size alignment */
203 	*nbytes = ummin(*nbytes, iores->size - offset);
204 	if (*nbytes < iores->bus_width && *nbytes % iores->bus_width != 0)
205 		return (EFAULT);
206 
207 	return (0);
208 }
209 
210 static int
bhnd_nvram_iores_read(struct bhnd_nvram_io * io,size_t offset,void * buffer,size_t nbytes)211 bhnd_nvram_iores_read(struct bhnd_nvram_io *io, size_t offset, void *buffer,
212     size_t nbytes)
213 {
214 	struct bhnd_nvram_iores	*iores;
215 	bus_size_t		 r_offset;
216 	size_t			 navail;
217 	int			 error;
218 
219 	iores = (struct bhnd_nvram_iores *)io;
220 
221 	/* Validate the request and determine the actual number of readable
222 	 * bytes */
223 	navail = nbytes;
224 	if ((error = bhnd_nvram_iores_validate_req(iores, offset, &navail)))
225 		return (error);
226 
227 	/* At least nbytes must be readable */
228 	if (navail < nbytes)
229 		return (ENXIO);
230 
231 	/* Handle zero length read */
232 	if (nbytes == 0)
233 		return (0);
234 
235 	/* Determine actual resource offset and perform the read */
236 	r_offset = iores->offset + offset;
237 	switch (iores->bus_width) {
238 	case 1:
239 		bhnd_bus_read_region_stream_1(iores->res, r_offset, buffer,
240 		    nbytes);
241 		break;
242 	case 2:
243 		bhnd_bus_read_region_stream_2(iores->res, r_offset, buffer,
244 		    nbytes / 2);
245 		break;
246 	case 4:
247 		bhnd_bus_read_region_stream_4(iores->res, r_offset, buffer,
248 		    nbytes / 4);
249 		break;
250 	default:
251 		panic("unreachable!");
252 	}
253 
254 	return (0);
255 }
256 
257 static int
bhnd_nvram_iores_write(struct bhnd_nvram_io * io,size_t offset,void * buffer,size_t nbytes)258 bhnd_nvram_iores_write(struct bhnd_nvram_io *io, size_t offset,
259     void *buffer, size_t nbytes)
260 {
261 	struct bhnd_nvram_iores	*iores;
262 	size_t			 navail;
263 	bus_size_t		 r_offset;
264 	int			 error;
265 
266 	iores = (struct bhnd_nvram_iores *)io;
267 
268 	/* Validate the request and determine the actual number of writable
269 	 * bytes */
270 	navail = nbytes;
271 	if ((error = bhnd_nvram_iores_validate_req(iores, offset, &navail)))
272 		return (error);
273 
274 	/* At least nbytes must be writable */
275 	if (navail < nbytes)
276 		return (ENXIO);
277 
278 	/* Determine actual resource offset and perform the write */
279 	r_offset = iores->offset + offset;
280 	switch (iores->bus_width) {
281 	case 1:
282 		bhnd_bus_write_region_stream_1(iores->res, r_offset, buffer,
283 		    nbytes);
284 		break;
285 	case 2:
286 		bhnd_bus_write_region_stream_2(iores->res, r_offset, buffer,
287 		    nbytes / 2);
288 		break;
289 	case 4:
290 		bhnd_bus_write_region_stream_4(iores->res, r_offset, buffer,
291 		    nbytes / 4);
292 		break;
293 	default:
294 		panic("unreachable!");
295 	}
296 
297 	return (0);
298 }
299