xref: /original-bsd/sys/miscfs/procfs/procfs_mem.c (revision 7a38d872)
1 /*
2  * Copyright (c) 1993 The Regents of the University of California.
3  * Copyright (c) 1993 Jan-Simon Pendry
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry and Sean Eric Fagan.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)procfs_mem.c	8.2 (Berkeley) 01/17/94
12  *
13  * From:
14  *	$Id: procfs_mem.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
15  */
16 
17 /*
18  * This is a lightly hacked and merged version
19  * of sef's pread/pwrite functions
20  */
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/time.h>
25 #include <sys/kernel.h>
26 #include <sys/proc.h>
27 #include <sys/vnode.h>
28 #include <miscfs/procfs/procfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_page.h>
32 
33 static int
34 procfs_rwmem(p, uio)
35 	struct proc *p;
36 	struct uio *uio;
37 {
38 	int error;
39 	int writing;
40 
41 	writing = uio->uio_rw == UIO_WRITE;
42 
43 	/*
44 	 * Only map in one page at a time.  We don't have to, but it
45 	 * makes things easier.  This way is trivial - right?
46 	 */
47 	do {
48 		vm_map_t map, tmap;
49 		vm_object_t object;
50 		vm_offset_t kva;
51 		vm_offset_t uva;
52 		int page_offset;		/* offset into page */
53 		vm_offset_t pageno;		/* page number */
54 		vm_map_entry_t out_entry;
55 		vm_prot_t out_prot;
56 		vm_page_t m;
57 		boolean_t wired, single_use;
58 		vm_offset_t off;
59 		u_int len;
60 		int fix_prot;
61 
62 		uva = (vm_offset_t) uio->uio_offset;
63 		if (uva > VM_MAXUSER_ADDRESS) {
64 			error = 0;
65 			break;
66 		}
67 
68 		/*
69 		 * Get the page number of this segment.
70 		 */
71 		pageno = trunc_page(uva);
72 		page_offset = uva - pageno;
73 
74 		/*
75 		 * How many bytes to copy
76 		 */
77 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
78 
79 		/*
80 		 * The map we want...
81 		 */
82 		map = &p->p_vmspace->vm_map;
83 
84 		/*
85 		 * Check the permissions for the area we're interested
86 		 * in.
87 		 */
88 		fix_prot = 0;
89 		if (writing)
90 			fix_prot = !vm_map_check_protection(map, pageno,
91 					pageno + PAGE_SIZE, VM_PROT_WRITE);
92 
93 		if (fix_prot) {
94 			/*
95 			 * If the page is not writable, we make it so.
96 			 * XXX It is possible that a page may *not* be
97 			 * read/executable, if a process changes that!
98 			 * We will assume, for now, that a page is either
99 			 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
100 			 */
101 			error = vm_map_protect(map, pageno,
102 					pageno + PAGE_SIZE, VM_PROT_ALL, 0);
103 			if (error)
104 				break;
105 		}
106 
107 		/*
108 		 * Now we need to get the page.  out_entry, out_prot, wired,
109 		 * and single_use aren't used.  One would think the vm code
110 		 * would be a *bit* nicer...  We use tmap because
111 		 * vm_map_lookup() can change the map argument.
112 		 */
113 		tmap = map;
114 		error = vm_map_lookup(&tmap, pageno,
115 				      writing ? VM_PROT_WRITE : VM_PROT_READ,
116 				      &out_entry, &object, &off, &out_prot,
117 				      &wired, &single_use);
118 		/*
119 		 * We're done with tmap now.
120 		 */
121 		if (!error)
122 			vm_map_lookup_done(tmap, out_entry);
123 
124 		/*
125 		 * Fault the page in...
126 		 */
127 		if (!error && writing && object->shadow) {
128 			m = vm_page_lookup(object, off);
129 			if (m == 0 || (m->flags & PG_COPYONWRITE))
130 				error = vm_fault(map, pageno,
131 							VM_PROT_WRITE, FALSE);
132 		}
133 
134 		/* Find space in kernel_map for the page we're interested in */
135 		if (!error)
136 			error = vm_map_find(kernel_map, object, off, &kva,
137 					PAGE_SIZE, 1);
138 
139 		if (!error) {
140 			/*
141 			 * Neither vm_map_lookup() nor vm_map_find() appear
142 			 * to add a reference count to the object, so we do
143 			 * that here and now.
144 			 */
145 			vm_object_reference(object);
146 
147 			/*
148 			 * Mark the page we just found as pageable.
149 			 */
150 			error = vm_map_pageable(kernel_map, kva,
151 				kva + PAGE_SIZE, 0);
152 
153 			/*
154 			 * Now do the i/o move.
155 			 */
156 			if (!error)
157 				error = uiomove(kva + page_offset, len, uio);
158 
159 			vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
160 		}
161 		if (fix_prot)
162 			vm_map_protect(map, pageno, pageno + PAGE_SIZE,
163 					VM_PROT_READ|VM_PROT_EXECUTE, 0);
164 	} while (error == 0 && uio->uio_resid > 0);
165 
166 	return (error);
167 }
168 
169 /*
170  * Copy data in and out of the target process.
171  * We do this by mapping the process's page into
172  * the kernel and then doing a uiomove direct
173  * from the kernel address space.
174  */
175 int
176 procfs_domem(curp, p, pfs, uio)
177 	struct proc *curp;
178 	struct proc *p;
179 	struct pfsnode *pfs;
180 	struct uio *uio;
181 {
182 	int error;
183 
184 	if (uio->uio_resid == 0)
185 		return (0);
186 
187 	error = procfs_rwmem(p, uio);
188 
189 	return (error);
190 }
191 
192 /*
193  * Given process (p), find the vnode from which
194  * it's text segment is being executed.
195  *
196  * It would be nice to grab this information from
197  * the VM system, however, there is no sure-fire
198  * way of doing that.  Instead, fork(), exec() and
199  * wait() all maintain the p_textvp field in the
200  * process proc structure which contains a held
201  * reference to the exec'ed vnode.
202  */
203 struct vnode *
204 procfs_findtextvp(p)
205 	struct proc *p;
206 {
207 	return (p->p_textvp);
208 }
209 
210 
211 #ifdef probably_never
212 /*
213  * Given process (p), find the vnode from which
214  * it's text segment is being mapped.
215  *
216  * (This is here, rather than in procfs_subr in order
217  * to keep all the VM related code in one place.)
218  */
219 struct vnode *
220 procfs_findtextvp(p)
221 	struct proc *p;
222 {
223 	int error;
224 	vm_object_t object;
225 	vm_offset_t pageno;		/* page number */
226 
227 	/* find a vnode pager for the user address space */
228 
229 	for (pageno = VM_MIN_ADDRESS;
230 			pageno < VM_MAXUSER_ADDRESS;
231 			pageno += PAGE_SIZE) {
232 		vm_map_t map;
233 		vm_map_entry_t out_entry;
234 		vm_prot_t out_prot;
235 		boolean_t wired, single_use;
236 		vm_offset_t off;
237 
238 		map = &p->p_vmspace->vm_map;
239 		error = vm_map_lookup(&map, pageno,
240 			      VM_PROT_READ,
241 			      &out_entry, &object, &off, &out_prot,
242 			      &wired, &single_use);
243 
244 		if (!error) {
245 			vm_pager_t pager;
246 
247 			printf("procfs: found vm object\n");
248 			vm_map_lookup_done(map, out_entry);
249 			printf("procfs: vm object = %x\n", object);
250 
251 			/*
252 			 * At this point, assuming no errors, object
253 			 * is the VM object mapping UVA (pageno).
254 			 * Ensure it has a vnode pager, then grab
255 			 * the vnode from that pager's handle.
256 			 */
257 
258 			pager = object->pager;
259 			printf("procfs: pager = %x\n", pager);
260 			if (pager)
261 				printf("procfs: found pager, type = %d\n", pager->pg_type);
262 			if (pager && pager->pg_type == PG_VNODE) {
263 				struct vnode *vp;
264 
265 				vp = (struct vnode *) pager->pg_handle;
266 				printf("procfs: vp = 0x%x\n", vp);
267 				return (vp);
268 			}
269 		}
270 	}
271 
272 	printf("procfs: text object not found\n");
273 	return (0);
274 }
275 #endif /* probably_never */
276