1 /*- 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Simon 'corecode' Schubert <corecode@fs.ei.tum.de> 6 * by Thomas E. Spanjaard <tgen@netphreax.net> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * This is a source file used by both the kernel and libkvm. 38 */ 39 40 #ifndef _KERNEL 41 #define _KERNEL_STRUCTURES 42 #endif 43 44 #include <sys/proc.h> 45 #include <vm/vm_map.h> 46 #include <sys/kinfo.h> 47 #include <sys/tty.h> 48 #include <sys/conf.h> 49 #include <sys/jail.h> 50 #include <sys/mplock2.h> 51 #include <sys/globaldata.h> 52 #ifdef _KERNEL 53 #include <sys/systm.h> 54 #else 55 #include <string.h> 56 57 dev_t devid_from_dev(cdev_t dev); /* kvm_proc.c */ 58 #endif 59 60 61 #ifndef _KERNEL 62 /* 63 * This is a temporary hack for when libkvm compiles in this file 64 * from userland. These functions don't belong here. 65 */ 66 static void 67 timevalfix(struct timeval *t1) 68 { 69 70 if (t1->tv_usec < 0) { 71 t1->tv_sec--; 72 t1->tv_usec += 1000000; 73 } 74 if (t1->tv_usec >= 1000000) { 75 t1->tv_sec++; 76 t1->tv_usec -= 1000000; 77 } 78 } 79 80 static void 81 timevaladd(struct timeval *t1, const struct timeval *t2) 82 { 83 84 t1->tv_sec += t2->tv_sec; 85 t1->tv_usec += t2->tv_usec; 86 timevalfix(t1); 87 } 88 89 static void 90 ruadd(struct rusage *ru, struct rusage *ru2) 91 { 92 long *ip, *ip2; 93 int i; 94 95 timevaladd(&ru->ru_utime, &ru2->ru_utime); 96 timevaladd(&ru->ru_stime, &ru2->ru_stime); 97 if (ru->ru_maxrss < ru2->ru_maxrss) 98 ru->ru_maxrss = ru2->ru_maxrss; 99 ip = &ru->ru_first; ip2 = &ru2->ru_first; 100 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 101 *ip++ += *ip2++; 102 } 103 104 #endif 105 106 /* 107 * Fill in a struct kinfo_proc, and zero the lwp fields for a possible 108 * fill_kinfo_lwp() aggregation. 109 * 110 * NOTE! We may be asked to fill in kinfo_proc for a zombied process, and 111 * the process may be in the middle of being deallocated. Check all pointers 112 * for NULL. 113 * 114 * Caller must hold p->p_token 115 */ 116 void 117 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) 118 { 119 struct session *sess; 120 struct pgrp *pgrp; 121 struct vmspace *vm; 122 123 pgrp = p->p_pgrp; 124 sess = pgrp ? pgrp->pg_session : NULL; 125 126 bzero(kp, sizeof(*kp)); 127 128 kp->kp_paddr = (uintptr_t)p; 129 kp->kp_fd = (uintptr_t)p->p_fd; 130 131 kp->kp_flags = p->p_flags; 132 kp->kp_stat = p->p_stat; 133 kp->kp_lock = p->p_lock; 134 kp->kp_acflag = p->p_acflag; 135 kp->kp_traceflag = p->p_traceflag; 136 kp->kp_siglist = p->p_siglist; 137 if (p->p_sigacts) { 138 kp->kp_sigignore = p->p_sigignore; /* p_sigacts-> */ 139 kp->kp_sigcatch = p->p_sigcatch; /* p_sigacts-> */ 140 kp->kp_sigflag = p->p_sigacts->ps_flag; 141 } 142 kp->kp_start = p->p_start; 143 144 strncpy(kp->kp_comm, p->p_comm, sizeof(kp->kp_comm) - 1); 145 kp->kp_comm[sizeof(kp->kp_comm) - 1] = 0; 146 147 if (p->p_ucred) { 148 kp->kp_uid = p->p_ucred->cr_uid; 149 kp->kp_ngroups = p->p_ucred->cr_ngroups; 150 if (p->p_ucred->cr_groups) { 151 bcopy(p->p_ucred->cr_groups, kp->kp_groups, 152 NGROUPS * sizeof(kp->kp_groups[0])); 153 } 154 kp->kp_ruid = p->p_ucred->cr_ruid; 155 kp->kp_svuid = p->p_ucred->cr_svuid; 156 kp->kp_rgid = p->p_ucred->cr_rgid; 157 kp->kp_svgid = p->p_ucred->cr_svgid; 158 } 159 160 kp->kp_pid = p->p_pid; 161 if (p->p_oppid != 0) 162 kp->kp_ppid = p->p_oppid; 163 else 164 kp->kp_ppid = p->p_pptr != NULL ? p->p_pptr->p_pid : -1; 165 if (pgrp) { 166 kp->kp_pgid = pgrp->pg_id; 167 kp->kp_jobc = pgrp->pg_jobc; 168 } 169 if (sess) { 170 kp->kp_sid = sess->s_sid; 171 bcopy(sess->s_login, kp->kp_login, MAXLOGNAME); 172 if (sess->s_ttyvp != NULL) 173 kp->kp_auxflags |= KI_CTTY; 174 if ((p->p_session != NULL) && SESS_LEADER(p)) 175 kp->kp_auxflags |= KI_SLEADER; 176 } 177 if (sess && (p->p_flags & P_CONTROLT) != 0 && sess->s_ttyp != NULL) { 178 kp->kp_tdev = devid_from_dev(sess->s_ttyp->t_dev); 179 if (sess->s_ttyp->t_pgrp != NULL) 180 kp->kp_tpgid = sess->s_ttyp->t_pgrp->pg_id; 181 else 182 kp->kp_tpgid = -1; 183 if (sess->s_ttyp->t_session != NULL) 184 kp->kp_tsid = sess->s_ttyp->t_session->s_sid; 185 else 186 kp->kp_tsid = -1; 187 } else { 188 kp->kp_tdev = NOUDEV; 189 } 190 kp->kp_exitstat = p->p_xstat; 191 kp->kp_nthreads = p->p_nthreads; 192 kp->kp_nice = p->p_nice; 193 kp->kp_swtime = p->p_swtime; 194 195 if ((vm = p->p_vmspace) != NULL) { 196 kp->kp_vm_map_size = vm->vm_map.size; 197 kp->kp_vm_rssize = vmspace_resident_count(vm); 198 kp->kp_vm_swrss = vm->vm_swrss; 199 kp->kp_vm_tsize = btoc(vm->vm_tsize); 200 kp->kp_vm_dsize = btoc(vm->vm_dsize); 201 kp->kp_vm_ssize = btoc(vm->vm_ssize); 202 } 203 204 if (p->p_ucred && jailed(p->p_ucred)) 205 kp->kp_jailid = p->p_ucred->cr_prison->pr_id; 206 207 kp->kp_ru = p->p_ru; 208 kp->kp_cru = p->p_cru; 209 } 210 211 /* 212 * Fill in a struct kinfo_lwp. This routine also doubles as an aggregator 213 * of lwps for the proc. 214 * 215 * The kl structure must be initially zerod by the caller. Note that 216 * fill_kinfo_proc() will do this for us. 217 */ 218 void 219 fill_kinfo_lwp(struct lwp *lwp, struct kinfo_lwp *kl) 220 { 221 kl->kl_pid = lwp->lwp_proc->p_pid; 222 kl->kl_tid = lwp->lwp_tid; 223 224 kl->kl_flags = lwp->lwp_flags; 225 kl->kl_stat = lwp->lwp_stat; 226 kl->kl_lock = lwp->lwp_lock; 227 kl->kl_tdflags = lwp->lwp_thread->td_flags; 228 229 /* 230 * The process/lwp stat may not reflect whether the process is 231 * actually sleeping or not if the related thread was directly 232 * descheduled by LWKT. Adjust the stat if the thread is not 233 * runnable and not waiting to be scheduled on a cpu by the 234 * user process scheduler. 235 */ 236 if (kl->kl_stat == LSRUN) { 237 if ((kl->kl_tdflags & TDF_RUNQ) == 0 && 238 (lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0) { 239 kl->kl_stat = LSSLEEP; 240 } 241 } 242 #ifdef _KERNEL 243 kl->kl_mpcount = get_mplock_count(lwp->lwp_thread); 244 #else 245 kl->kl_mpcount = 0; 246 #endif 247 248 kl->kl_prio = lwp->lwp_usdata.bsd4.priority; /* XXX TGEN dangerous assumption */ 249 kl->kl_tdprio = lwp->lwp_thread->td_pri; 250 kl->kl_rtprio = lwp->lwp_rtprio; 251 252 kl->kl_uticks += lwp->lwp_thread->td_uticks; 253 kl->kl_sticks += lwp->lwp_thread->td_sticks; 254 kl->kl_iticks += lwp->lwp_thread->td_iticks; 255 kl->kl_cpticks += lwp->lwp_cpticks; 256 kl->kl_pctcpu += lwp->lwp_proc->p_stat == SZOMB ? 0 : lwp->lwp_pctcpu; 257 kl->kl_slptime += lwp->lwp_slptime; 258 kl->kl_origcpu = lwp->lwp_usdata.bsd4.batch; 259 kl->kl_estcpu = lwp->lwp_usdata.bsd4.estcpu; 260 kl->kl_cpuid = lwp->lwp_thread->td_gd->gd_cpuid; 261 262 ruadd(&kl->kl_ru, &lwp->lwp_ru); 263 264 kl->kl_siglist = lwp->lwp_siglist; 265 kl->kl_sigmask = lwp->lwp_sigmask; 266 267 kl->kl_wchan = (uintptr_t)lwp->lwp_thread->td_wchan; 268 if (lwp->lwp_thread->td_wmesg) { 269 strncpy(kl->kl_wmesg, lwp->lwp_thread->td_wmesg, WMESGLEN); 270 kl->kl_wmesg[WMESGLEN] = 0; 271 } 272 strlcpy(kl->kl_comm, lwp->lwp_thread->td_comm, sizeof(kl->kl_comm)); 273 } 274 275 /* 276 * Fill in a struct kinfo_proc for kernel threads (i.e. those without proc). 277 */ 278 void 279 fill_kinfo_proc_kthread(struct thread *td, struct kinfo_proc *kp) 280 { 281 bzero(kp, sizeof(*kp)); 282 283 /* 284 * Fill in fake proc information and semi-fake lwp info. 285 */ 286 kp->kp_pid = -1; 287 kp->kp_tdev = NOUDEV; 288 strncpy(kp->kp_comm, td->td_comm, sizeof(kp->kp_comm) - 1); 289 kp->kp_comm[sizeof(kp->kp_comm) - 1] = 0; 290 kp->kp_flags = P_SYSTEM; 291 if (td != &td->td_gd->gd_idlethread) 292 kp->kp_stat = SACTIVE; 293 else 294 kp->kp_stat = SIDL; 295 kp->kp_nthreads = 1; 296 kp->kp_ktaddr = (uintptr_t)td; 297 298 kp->kp_lwp.kl_pid = -1; 299 kp->kp_lwp.kl_tid = -1; 300 kp->kp_lwp.kl_tdflags = td->td_flags; 301 #ifdef _KERNEL 302 kp->kp_lwp.kl_mpcount = get_mplock_count(td); 303 #else 304 kp->kp_lwp.kl_mpcount = 0; 305 #endif 306 307 kp->kp_lwp.kl_tdprio = td->td_pri; 308 kp->kp_lwp.kl_rtprio.type = RTP_PRIO_THREAD; 309 kp->kp_lwp.kl_rtprio.prio = td->td_pri; 310 311 kp->kp_lwp.kl_uticks = td->td_uticks; 312 kp->kp_lwp.kl_sticks = td->td_sticks; 313 kp->kp_lwp.kl_iticks = td->td_iticks; 314 kp->kp_lwp.kl_cpuid = td->td_gd->gd_cpuid; 315 316 kp->kp_lwp.kl_wchan = (uintptr_t)td->td_wchan; 317 if (td->td_flags & TDF_RUNQ) 318 kp->kp_lwp.kl_stat = LSRUN; 319 else 320 kp->kp_lwp.kl_stat = LSSLEEP; 321 if (td->td_wmesg) { 322 strncpy(kp->kp_lwp.kl_wmesg, td->td_wmesg, WMESGLEN); 323 kp->kp_lwp.kl_wmesg[WMESGLEN] = 0; 324 } 325 strlcpy(kp->kp_lwp.kl_comm, td->td_comm, sizeof(kp->kp_lwp.kl_comm)); 326 } 327