1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department, and code derived from software contributed to
11 * Berkeley by William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: Utah $Hdr: mem.c 1.13 89/10/08$
38 */
39
40 #include <sys/cdefs.h>
41 /*
42 * Memory special file
43 */
44
45 #include <sys/param.h>
46 #include <sys/conf.h>
47 #include <sys/fcntl.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/ioccom.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/module.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/msgbuf.h>
57 #include <sys/systm.h>
58 #include <sys/signalvar.h>
59 #include <sys/uio.h>
60
61 #include <machine/md_var.h>
62 #include <machine/vmparam.h>
63
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_page.h>
68
69 #include <machine/memdev.h>
70
71 static void ppc_mrinit(struct mem_range_softc *);
72 static int ppc_mrset(struct mem_range_softc *, struct mem_range_desc *, int *);
73
74 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
75
76 static struct mem_range_ops ppc_mem_range_ops = {
77 ppc_mrinit,
78 ppc_mrset,
79 NULL,
80 NULL
81 };
82 struct mem_range_softc mem_range_softc = {
83 &ppc_mem_range_ops,
84 0, 0, NULL
85 };
86
87 /* ARGSUSED */
88 int
memrw(struct cdev * dev,struct uio * uio,int flags)89 memrw(struct cdev *dev, struct uio *uio, int flags)
90 {
91 struct iovec *iov;
92 int error = 0;
93 vm_offset_t va, eva, off, v;
94 vm_prot_t prot;
95 struct vm_page m;
96 vm_page_t marr;
97 vm_size_t cnt;
98 ssize_t orig_resid;
99
100 cnt = 0;
101 error = 0;
102 orig_resid = uio->uio_resid;
103
104 while (uio->uio_resid > 0 && !error) {
105 iov = uio->uio_iov;
106 if (iov->iov_len == 0) {
107 uio->uio_iov++;
108 uio->uio_iovcnt--;
109 if (uio->uio_iovcnt < 0)
110 panic("memrw");
111 continue;
112 }
113 if (dev2unit(dev) == CDEV_MINOR_MEM) {
114 v = uio->uio_offset;
115
116 kmem_direct_mapped: off = v & PAGE_MASK;
117 cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
118 PAGE_MASK);
119 cnt = min(cnt, PAGE_SIZE - off);
120 cnt = min(cnt, iov->iov_len);
121
122 if (mem_valid(v, cnt)) {
123 error = EFAULT;
124 break;
125 }
126
127 if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
128 error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
129 uio);
130 } else {
131 m.phys_addr = trunc_page(v);
132 marr = &m;
133 error = uiomove_fromphys(&marr, off, cnt, uio);
134 }
135 }
136 else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
137 va = uio->uio_offset;
138
139 if (hw_direct_map &&
140 ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end))) {
141 v = DMAP_TO_PHYS(va);
142 goto kmem_direct_mapped;
143 }
144
145 va = trunc_page(uio->uio_offset);
146 eva = round_page(uio->uio_offset
147 + iov->iov_len);
148
149 /*
150 * Make sure that all the pages are currently resident
151 * so that we don't create any zero-fill pages.
152 */
153
154 for (; va < eva; va += PAGE_SIZE) {
155 if (pmap_extract(kernel_pmap, va) == 0) {
156 error = EFAULT;
157 break;
158 }
159 }
160 if (error != 0)
161 break;
162
163 prot = (uio->uio_rw == UIO_READ)
164 ? VM_PROT_READ : VM_PROT_WRITE;
165
166 va = uio->uio_offset;
167 if (((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) &&
168 !kernacc((void *) va, iov->iov_len, prot)) {
169 error = EFAULT;
170 break;
171 }
172
173 error = uiomove((void *)va, iov->iov_len, uio);
174 }
175 }
176 /*
177 * Don't return error if any byte was written. Read and write
178 * can return error only if no i/o was performed.
179 */
180 if (uio->uio_resid != orig_resid)
181 error = 0;
182 return (error);
183 }
184
185 /*
186 * allow user processes to MMAP some memory sections
187 * instead of going through read/write
188 */
189 int
memmmap(struct cdev * dev,vm_ooffset_t offset,vm_paddr_t * paddr,int prot,vm_memattr_t * memattr)190 memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
191 int prot, vm_memattr_t *memattr)
192 {
193 int i;
194
195 if (dev2unit(dev) == CDEV_MINOR_MEM)
196 *paddr = offset;
197 else
198 return (EFAULT);
199
200 for (i = 0; i < mem_range_softc.mr_ndesc; i++) {
201 if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE))
202 continue;
203
204 if (offset >= mem_range_softc.mr_desc[i].mr_base &&
205 offset < mem_range_softc.mr_desc[i].mr_base +
206 mem_range_softc.mr_desc[i].mr_len) {
207 switch (mem_range_softc.mr_desc[i].mr_flags &
208 MDF_ATTRMASK) {
209 case MDF_WRITEBACK:
210 *memattr = VM_MEMATTR_WRITE_BACK;
211 break;
212 case MDF_WRITECOMBINE:
213 *memattr = VM_MEMATTR_WRITE_COMBINING;
214 break;
215 case MDF_UNCACHEABLE:
216 *memattr = VM_MEMATTR_UNCACHEABLE;
217 break;
218 case MDF_WRITETHROUGH:
219 *memattr = VM_MEMATTR_WRITE_THROUGH;
220 break;
221 }
222
223 break;
224 }
225 }
226
227 return (0);
228 }
229
230 static void
ppc_mrinit(struct mem_range_softc * sc)231 ppc_mrinit(struct mem_range_softc *sc)
232 {
233 sc->mr_cap = 0;
234 sc->mr_ndesc = 8; /* XXX: Should be dynamically expandable */
235 sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc),
236 M_MEMDESC, M_WAITOK | M_ZERO);
237 }
238
239 static int
ppc_mrset(struct mem_range_softc * sc,struct mem_range_desc * desc,int * arg)240 ppc_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
241 {
242 int i;
243
244 switch(*arg) {
245 case MEMRANGE_SET_UPDATE:
246 for (i = 0; i < sc->mr_ndesc; i++) {
247 if (!sc->mr_desc[i].mr_len) {
248 sc->mr_desc[i] = *desc;
249 sc->mr_desc[i].mr_flags |= MDF_ACTIVE;
250 return (0);
251 }
252 if (sc->mr_desc[i].mr_base == desc->mr_base &&
253 sc->mr_desc[i].mr_len == desc->mr_len)
254 return (EEXIST);
255 }
256 return (ENOSPC);
257 case MEMRANGE_SET_REMOVE:
258 for (i = 0; i < sc->mr_ndesc; i++)
259 if (sc->mr_desc[i].mr_base == desc->mr_base &&
260 sc->mr_desc[i].mr_len == desc->mr_len) {
261 bzero(&sc->mr_desc[i], sizeof(sc->mr_desc[i]));
262 return (0);
263 }
264 return (ENOENT);
265 default:
266 return (EOPNOTSUPP);
267 }
268
269 return (0);
270 }
271
272 /*
273 * Operations for changing memory attributes.
274 *
275 * This is basically just an ioctl shim for mem_range_attr_get
276 * and mem_range_attr_set.
277 */
278 int
memioctl_md(struct cdev * dev __unused,u_long cmd,caddr_t data,int flags,struct thread * td)279 memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
280 struct thread *td)
281 {
282 int nd, error = 0;
283 struct mem_range_op *mo = (struct mem_range_op *)data;
284 struct mem_range_desc *md;
285
286 /* is this for us? */
287 if ((cmd != MEMRANGE_GET) &&
288 (cmd != MEMRANGE_SET))
289 return (ENOTTY);
290
291 /* any chance we can handle this? */
292 if (mem_range_softc.mr_op == NULL)
293 return (EOPNOTSUPP);
294
295 /* do we have any descriptors? */
296 if (mem_range_softc.mr_ndesc == 0)
297 return (ENXIO);
298
299 switch (cmd) {
300 case MEMRANGE_GET:
301 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
302 if (nd > 0) {
303 md = (struct mem_range_desc *)
304 malloc(nd * sizeof(struct mem_range_desc),
305 M_MEMDESC, M_WAITOK);
306 error = mem_range_attr_get(md, &nd);
307 if (!error)
308 error = copyout(md, mo->mo_desc,
309 nd * sizeof(struct mem_range_desc));
310 free(md, M_MEMDESC);
311 }
312 else
313 nd = mem_range_softc.mr_ndesc;
314 mo->mo_arg[0] = nd;
315 break;
316
317 case MEMRANGE_SET:
318 md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc),
319 M_MEMDESC, M_WAITOK);
320 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
321 /* clamp description string */
322 md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
323 if (error == 0)
324 error = mem_range_attr_set(md, &mo->mo_arg[0]);
325 free(md, M_MEMDESC);
326 break;
327 }
328 return (error);
329 }
330