xref: /dragonfly/sys/vfs/procfs/procfs_map.c (revision d4ef6694)
1 /*
2  * Copyright (c) 1993 Jan-Simon Pendry
3  * Copyright (c) 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)procfs_status.c	8.3 (Berkeley) 2/17/94
34  *
35  * $FreeBSD: src/sys/miscfs/procfs/procfs_map.c,v 1.24.2.1 2001/08/04 13:12:24 rwatson Exp $
36  * $DragonFly: src/sys/vfs/procfs/procfs_map.c,v 1.7 2007/02/19 01:14:24 corecode Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <vfs/procfs/procfs.h>
44 
45 #include <vm/vm.h>
46 #include <sys/lock.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_object.h>
51 
52 #include <machine/limits.h>
53 
54 #define MEBUFFERSIZE 256
55 
56 /*
57  * The map entries can *almost* be read with programs like cat.  However,
58  * large maps need special programs to read.  It is not easy to implement
59  * a program that can sense the required size of the buffer, and then
60  * subsequently do a read with the appropriate size.  This operation cannot
61  * be atomic.  The best that we can do is to allow the program to do a read
62  * with an arbitrarily large buffer, and return as much as we can.  We can
63  * return an error code if the buffer is too small (EFBIG), then the program
64  * can try a bigger buffer.
65  */
66 int
67 procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
68 	     struct uio *uio)
69 {
70 	struct proc *p = lp->lwp_proc;
71 	int len;
72 	struct vnode *vp;
73 	char *fullpath, *freepath;
74 	int error;
75 	vm_map_t map = &p->p_vmspace->vm_map;
76 	pmap_t pmap = vmspace_pmap(p->p_vmspace);
77 	vm_map_entry_t entry;
78 	char mebuffer[MEBUFFERSIZE];
79 
80 	if (uio->uio_rw != UIO_READ)
81 		return (EOPNOTSUPP);
82 
83 	if (uio->uio_offset != 0)
84 		return (0);
85 
86 	error = 0;
87 	vm_map_lock_read(map);
88 	for (entry = map->header.next;
89 		((uio->uio_resid > 0) && (entry != &map->header));
90 		entry = entry->next) {
91 		vm_object_t obj, tobj, lobj;
92 		int ref_count, shadow_count, flags;
93 		vm_offset_t addr;
94 		vm_offset_t ostart;
95 		int resident, privateresident;
96 		char *type;
97 
98 		if (entry->maptype != VM_MAPTYPE_NORMAL &&
99 		    entry->maptype != VM_MAPTYPE_VPAGETABLE) {
100 			continue;
101 		}
102 
103 		obj = entry->object.vm_object;
104 		if (obj)
105 			vm_object_hold(obj);
106 
107 		if (obj && (obj->shadow_count == 1))
108 			privateresident = obj->resident_page_count;
109 		else
110 			privateresident = 0;
111 
112 		/*
113 		 * Use map->hint as a poor man's ripout detector.
114 		 */
115 		map->hint = entry;
116 		ostart = entry->start;
117 
118 		/*
119 		 * Count resident pages (XXX can be horrible on 64-bit)
120 		 */
121 		resident = 0;
122 		addr = entry->start;
123 		while (addr < entry->end) {
124 			if (pmap_extract(pmap, addr))
125 				resident++;
126 			addr += PAGE_SIZE;
127 		}
128 		if (obj) {
129 			lobj = obj;
130 			while ((tobj = lobj->backing_object) != NULL) {
131 				KKASSERT(tobj != obj);
132 				vm_object_hold(tobj);
133 				if (tobj == lobj->backing_object) {
134 					if (lobj != obj) {
135 						vm_object_lock_swap();
136 						vm_object_drop(lobj);
137 					}
138 					lobj = tobj;
139 				} else {
140 					vm_object_drop(tobj);
141 				}
142 			}
143 		} else {
144 			lobj = NULL;
145 		}
146 
147 		freepath = NULL;
148 		fullpath = "-";
149 		if (lobj) {
150 			switch(lobj->type) {
151 			default:
152 			case OBJT_DEFAULT:
153 				type = "default";
154 				vp = NULL;
155 				break;
156 			case OBJT_VNODE:
157 				type = "vnode";
158 				vp = lobj->handle;
159 				vref(vp);
160 				break;
161 			case OBJT_SWAP:
162 				type = "swap";
163 				vp = NULL;
164 				break;
165 			case OBJT_DEVICE:
166 				type = "device";
167 				vp = NULL;
168 				break;
169 			case OBJT_MGTDEVICE:
170 				type = "mgtdevice";
171 				vp = NULL;
172 				break;
173 			}
174 
175 			flags = obj->flags;
176 			ref_count = obj->ref_count;
177 			shadow_count = obj->shadow_count;
178 			if (vp != NULL) {
179 				vn_fullpath(p, vp, &fullpath, &freepath, 1);
180 				vrele(vp);
181 			}
182 			if (lobj != obj)
183 				vm_object_drop(lobj);
184 		} else {
185 			type = "none";
186 			flags = 0;
187 			ref_count = 0;
188 			shadow_count = 0;
189 		}
190 
191 		/*
192 		 * format:
193 		 *  start, end, res, priv res, cow, access, type, (fullpath).
194 		 */
195 		ksnprintf(mebuffer, sizeof(mebuffer),
196 #if LONG_BIT == 64
197 			  "0x%016lx 0x%016lx %d %d %p %s%s%s %d %d "
198 #else
199 			  "0x%08lx 0x%08lx %d %d %p %s%s%s %d %d "
200 #endif
201 			  "0x%04x %s %s %s %s\n",
202 			(u_long)entry->start, (u_long)entry->end,
203 			resident, privateresident, obj,
204 			(entry->protection & VM_PROT_READ)?"r":"-",
205 			(entry->protection & VM_PROT_WRITE)?"w":"-",
206 			(entry->protection & VM_PROT_EXECUTE)?"x":"-",
207 			ref_count, shadow_count, flags,
208 			(entry->eflags & MAP_ENTRY_COW)?"COW":"NCOW",
209 			(entry->eflags & MAP_ENTRY_NEEDS_COPY)?"NC":"NNC",
210 			type, fullpath);
211 
212 		if (obj)
213 			vm_object_drop(obj);
214 
215 		if (freepath != NULL) {
216 			kfree(freepath, M_TEMP);
217 			freepath = NULL;
218 		}
219 
220 		len = strlen(mebuffer);
221 		if (len > uio->uio_resid) {
222 			error = EFBIG;
223 			break;
224 		}
225 
226 		/*
227 		 * We cannot safely hold the map locked while accessing
228 		 * userspace as a VM fault might recurse the locked map.
229 		 */
230 		vm_map_unlock_read(map);
231 		error = uiomove(mebuffer, len, uio);
232 		vm_map_lock_read(map);
233 		if (error)
234 			break;
235 
236 		/*
237 		 * We use map->hint as a poor man's ripout detector.  If
238 		 * it does not match the entry we set it to prior to
239 		 * unlocking the map the entry MIGHT now be stale.  In
240 		 * this case we do an expensive lookup to find our place
241 		 * in the iteration again.
242 		 */
243 		if (map->hint != entry) {
244 			vm_map_entry_t reentry;
245 
246 			vm_map_lookup_entry(map, ostart, &reentry);
247 			entry = reentry;
248 		}
249 	}
250 	vm_map_unlock_read(map);
251 
252 	return error;
253 }
254 
255 int
256 procfs_validmap(struct lwp *lp)
257 {
258 	return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0);
259 }
260