xref: /original-bsd/sys/miscfs/procfs/procfs_mem.c (revision deff14a8)
1 /*
2  * Copyright (c) 1993 Jan-Simon Pendry
3  * Copyright (c) 1993 Sean Eric Fagan
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Jan-Simon Pendry and Sean Eric Fagan.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)procfs_mem.c	8.5 (Berkeley) 06/15/94
13  *
14  * From:
15  *	$Id: procfs_mem.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
16  */
17 
18 /*
19  * This is a lightly hacked and merged version
20  * of sef's pread/pwrite functions
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/time.h>
26 #include <sys/kernel.h>
27 #include <sys/proc.h>
28 #include <sys/vnode.h>
29 #include <miscfs/procfs/procfs.h>
30 #include <vm/vm.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_page.h>
33 
34 static int
35 procfs_rwmem(p, uio)
36 	struct proc *p;
37 	struct uio *uio;
38 {
39 	int error;
40 	int writing;
41 
42 	writing = uio->uio_rw == UIO_WRITE;
43 
44 	/*
45 	 * Only map in one page at a time.  We don't have to, but it
46 	 * makes things easier.  This way is trivial - right?
47 	 */
48 	do {
49 		vm_map_t map, tmap;
50 		vm_object_t object;
51 		vm_offset_t kva;
52 		vm_offset_t uva;
53 		int page_offset;		/* offset into page */
54 		vm_offset_t pageno;		/* page number */
55 		vm_map_entry_t out_entry;
56 		vm_prot_t out_prot;
57 		vm_page_t m;
58 		boolean_t wired, single_use;
59 		vm_offset_t off;
60 		u_int len;
61 		int fix_prot;
62 
63 		uva = (vm_offset_t) uio->uio_offset;
64 		if (uva > VM_MAXUSER_ADDRESS) {
65 			error = 0;
66 			break;
67 		}
68 
69 		/*
70 		 * Get the page number of this segment.
71 		 */
72 		pageno = trunc_page(uva);
73 		page_offset = uva - pageno;
74 
75 		/*
76 		 * How many bytes to copy
77 		 */
78 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
79 
80 		/*
81 		 * The map we want...
82 		 */
83 		map = &p->p_vmspace->vm_map;
84 
85 		/*
86 		 * Check the permissions for the area we're interested
87 		 * in.
88 		 */
89 		fix_prot = 0;
90 		if (writing)
91 			fix_prot = !vm_map_check_protection(map, pageno,
92 					pageno + PAGE_SIZE, VM_PROT_WRITE);
93 
94 		if (fix_prot) {
95 			/*
96 			 * If the page is not writable, we make it so.
97 			 * XXX It is possible that a page may *not* be
98 			 * read/executable, if a process changes that!
99 			 * We will assume, for now, that a page is either
100 			 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
101 			 */
102 			error = vm_map_protect(map, pageno,
103 					pageno + PAGE_SIZE, VM_PROT_ALL, 0);
104 			if (error)
105 				break;
106 		}
107 
108 		/*
109 		 * Now we need to get the page.  out_entry, out_prot, wired,
110 		 * and single_use aren't used.  One would think the vm code
111 		 * would be a *bit* nicer...  We use tmap because
112 		 * vm_map_lookup() can change the map argument.
113 		 */
114 		tmap = map;
115 		error = vm_map_lookup(&tmap, pageno,
116 				      writing ? VM_PROT_WRITE : VM_PROT_READ,
117 				      &out_entry, &object, &off, &out_prot,
118 				      &wired, &single_use);
119 		/*
120 		 * We're done with tmap now.
121 		 */
122 		if (!error)
123 			vm_map_lookup_done(tmap, out_entry);
124 
125 		/*
126 		 * Fault the page in...
127 		 */
128 		if (!error && writing && object->shadow) {
129 			m = vm_page_lookup(object, off);
130 			if (m == 0 || (m->flags & PG_COPYONWRITE))
131 				error = vm_fault(map, pageno,
132 							VM_PROT_WRITE, FALSE);
133 		}
134 
135 		/* Find space in kernel_map for the page we're interested in */
136 		if (!error)
137 			error = vm_map_find(kernel_map, object, off, &kva,
138 					PAGE_SIZE, 1);
139 
140 		if (!error) {
141 			/*
142 			 * Neither vm_map_lookup() nor vm_map_find() appear
143 			 * to add a reference count to the object, so we do
144 			 * that here and now.
145 			 */
146 			vm_object_reference(object);
147 
148 			/*
149 			 * Mark the page we just found as pageable.
150 			 */
151 			error = vm_map_pageable(kernel_map, kva,
152 				kva + PAGE_SIZE, 0);
153 
154 			/*
155 			 * Now do the i/o move.
156 			 */
157 			if (!error)
158 				error = uiomove(kva + page_offset, len, uio);
159 
160 			vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
161 		}
162 		if (fix_prot)
163 			vm_map_protect(map, pageno, pageno + PAGE_SIZE,
164 					VM_PROT_READ|VM_PROT_EXECUTE, 0);
165 	} while (error == 0 && uio->uio_resid > 0);
166 
167 	return (error);
168 }
169 
170 /*
171  * Copy data in and out of the target process.
172  * We do this by mapping the process's page into
173  * the kernel and then doing a uiomove direct
174  * from the kernel address space.
175  */
176 int
177 procfs_domem(curp, p, pfs, uio)
178 	struct proc *curp;
179 	struct proc *p;
180 	struct pfsnode *pfs;
181 	struct uio *uio;
182 {
183 
184 	if (uio->uio_resid == 0)
185 		return (0);
186 
187 	return (procfs_rwmem(p, uio));
188 }
189 
190 /*
191  * Given process (p), find the vnode from which
192  * it's text segment is being executed.
193  *
194  * It would be nice to grab this information from
195  * the VM system, however, there is no sure-fire
196  * way of doing that.  Instead, fork(), exec() and
197  * wait() all maintain the p_textvp field in the
198  * process proc structure which contains a held
199  * reference to the exec'ed vnode.
200  */
201 struct vnode *
202 procfs_findtextvp(p)
203 	struct proc *p;
204 {
205 
206 	return (p->p_textvp);
207 }
208 
209 
210 #ifdef probably_never
211 /*
212  * Given process (p), find the vnode from which
213  * it's text segment is being mapped.
214  *
215  * (This is here, rather than in procfs_subr in order
216  * to keep all the VM related code in one place.)
217  */
218 struct vnode *
219 procfs_findtextvp(p)
220 	struct proc *p;
221 {
222 	int error;
223 	vm_object_t object;
224 	vm_offset_t pageno;		/* page number */
225 
226 	/* find a vnode pager for the user address space */
227 
228 	for (pageno = VM_MIN_ADDRESS;
229 			pageno < VM_MAXUSER_ADDRESS;
230 			pageno += PAGE_SIZE) {
231 		vm_map_t map;
232 		vm_map_entry_t out_entry;
233 		vm_prot_t out_prot;
234 		boolean_t wired, single_use;
235 		vm_offset_t off;
236 
237 		map = &p->p_vmspace->vm_map;
238 		error = vm_map_lookup(&map, pageno,
239 			      VM_PROT_READ,
240 			      &out_entry, &object, &off, &out_prot,
241 			      &wired, &single_use);
242 
243 		if (!error) {
244 			vm_pager_t pager;
245 
246 			printf("procfs: found vm object\n");
247 			vm_map_lookup_done(map, out_entry);
248 			printf("procfs: vm object = %x\n", object);
249 
250 			/*
251 			 * At this point, assuming no errors, object
252 			 * is the VM object mapping UVA (pageno).
253 			 * Ensure it has a vnode pager, then grab
254 			 * the vnode from that pager's handle.
255 			 */
256 
257 			pager = object->pager;
258 			printf("procfs: pager = %x\n", pager);
259 			if (pager)
260 				printf("procfs: found pager, type = %d\n", pager->pg_type);
261 			if (pager && pager->pg_type == PG_VNODE) {
262 				struct vnode *vp;
263 
264 				vp = (struct vnode *) pager->pg_handle;
265 				printf("procfs: vp = 0x%x\n", vp);
266 				return (vp);
267 			}
268 		}
269 	}
270 
271 	printf("procfs: text object not found\n");
272 	return (0);
273 }
274 #endif /* probably_never */
275