xref: /netbsd/sys/arch/usermode/usermode/vm_machdep.c (revision 474ee3bb)
1 /* $NetBSD: vm_machdep.c,v 1.5 2018/01/24 09:04:45 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.5 2018/01/24 09:04:45 skrll Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/proc.h>
35 
36 #include <uvm/uvm_extern.h>
37 
38 
39 /*
40  * Map a user I/O request into kernel virtual address space.
41  * Note: the pages are already locked by uvm_vslock(), so we
42  * do not need to pass an access_type to pmap_enter().
43  */
44 /* This code was originally stolen from the alpha port. */
45 
46 int
vmapbuf(struct buf * bp,vsize_t len)47 vmapbuf(struct buf *bp, vsize_t len)
48 {
49 	vaddr_t faddr, taddr, off;
50 	paddr_t pa;
51 	struct proc *p;
52 	vm_prot_t prot;
53 
54 	if ((bp->b_flags & B_PHYS) == 0)
55 		panic("vmapbuf");
56 	p = bp->b_proc;
57 	bp->b_saveaddr = bp->b_data;
58 	faddr = trunc_page((vaddr_t)bp->b_data);
59 	off = (vaddr_t)bp->b_data - faddr;
60 	len = round_page(off + len);
61 	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
62 	bp->b_data = (void *)(taddr + off);
63 	len = atop(len);
64 	prot = bp->b_flags & B_READ ? VM_PROT_READ | VM_PROT_WRITE :
65 				      VM_PROT_READ;
66 	while (len--) {
67 		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
68 		    &pa) == false)
69 			panic("vmapbuf: null page frame");
70 		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
71 		    prot, prot | PMAP_WIRED);
72 		faddr += PAGE_SIZE;
73 		taddr += PAGE_SIZE;
74 	}
75 	pmap_update(vm_map_pmap(phys_map));
76 
77 	return 0;
78 }
79 
80 /*
81  * Unmap a previously-mapped user I/O request.
82  */
83 void
vunmapbuf(struct buf * bp,vsize_t len)84 vunmapbuf(struct buf *bp, vsize_t len)
85 {
86 	vaddr_t addr, off;
87 
88 	if ((bp->b_flags & B_PHYS) == 0)
89 		panic("vunmapbuf");
90 	addr = trunc_page((vaddr_t)bp->b_data);
91 	off = (vaddr_t)bp->b_data - addr;
92 	len = round_page(off + len);
93 	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
94 	pmap_update(vm_map_pmap(phys_map));
95 	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
96 	bp->b_data = bp->b_saveaddr;
97 	bp->b_saveaddr = NULL;
98 }
99 
100