xref: /minix/minix/servers/vfs/misc.c (revision fb4fbf7a)
1 /* This file contains a collection of miscellaneous procedures.  Some of them
2  * perform simple system calls.  Some others do a little part of system calls
3  * that are mostly performed by the Memory Manager.
4  *
5  * The entry points into this file are
6  *   do_fcntl:	  perform the FCNTL system call
7  *   do_sync:	  perform the SYNC system call
8  *   do_fsync:	  perform the FSYNC system call
9  *   pm_setsid:	  perform VFS's side of setsid system call
10  *   pm_reboot:	  sync disks and prepare for shutdown
11  *   pm_fork:	  adjust the tables after PM has performed a FORK system call
12  *   do_exec:	  handle files with FD_CLOEXEC on after PM has done an EXEC
13  *   do_exit:	  a process has exited; note that in the tables
14  *   do_set:	  set uid or gid for some process
15  *   do_revive:	  revive a process that was waiting for something (e.g. TTY)
16  *   do_svrctl:	  file system control
17  *   do_getsysinfo:	request copy of FS data structure
18  *   pm_dumpcore: create a core dump
19  */
20 
21 #include "fs.h"
22 #include <fcntl.h>
23 #include <assert.h>
24 #include <unistd.h>
25 #include <string.h>
26 #include <minix/callnr.h>
27 #include <minix/safecopies.h>
28 #include <minix/endpoint.h>
29 #include <minix/com.h>
30 #include <minix/sysinfo.h>
31 #include <minix/u64.h>
32 #include <sys/ptrace.h>
33 #include <sys/svrctl.h>
34 #include <sys/resource.h>
35 #include "file.h"
36 #include <minix/vfsif.h>
37 #include "vnode.h"
38 #include "vmnt.h"
39 
40 #define CORE_NAME	"core"
41 #define CORE_MODE	0777	/* mode to use on core image files */
42 
43 #if ENABLE_SYSCALL_STATS
44 unsigned long calls_stats[NR_VFS_CALLS];
45 #endif
46 
47 static void free_proc(int flags);
48 
49 /*===========================================================================*
50  *				do_getsysinfo				     *
51  *===========================================================================*/
52 int do_getsysinfo(void)
53 {
54   struct fproc *rfp;
55   struct fproc_light *rfpl;
56   vir_bytes src_addr, dst_addr;
57   size_t len, buf_size;
58   int what;
59 
60   what = job_m_in.m_lsys_getsysinfo.what;
61   dst_addr = job_m_in.m_lsys_getsysinfo.where;
62   buf_size = job_m_in.m_lsys_getsysinfo.size;
63 
64   /* Only su may call do_getsysinfo. This call may leak information (and is not
65    * stable enough to be part of the API/ABI). In the future, requests from
66    * non-system processes should be denied.
67    */
68 
69   if (!super_user) return(EPERM);
70 
71   switch(what) {
72     case SI_PROC_TAB:
73 	src_addr = (vir_bytes) fproc;
74 	len = sizeof(struct fproc) * NR_PROCS;
75 	break;
76     case SI_DMAP_TAB:
77 	src_addr = (vir_bytes) dmap;
78 	len = sizeof(struct dmap) * NR_DEVICES;
79 	break;
80     case SI_PROCLIGHT_TAB:
81 	/* Fill the light process table for the MIB service upon request. */
82 	rfpl = &fproc_light[0];
83 	for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++, rfpl++) {
84 		rfpl->fpl_tty = rfp->fp_tty;
85 		rfpl->fpl_blocked_on = rfp->fp_blocked_on;
86 		rfpl->fpl_task = rfp->fp_task;
87 	}
88 	src_addr = (vir_bytes) fproc_light;
89 	len = sizeof(fproc_light);
90 	break;
91 #if ENABLE_SYSCALL_STATS
92     case SI_CALL_STATS:
93 	src_addr = (vir_bytes) calls_stats;
94 	len = sizeof(calls_stats);
95 	break;
96 #endif
97     default:
98 	return(EINVAL);
99   }
100 
101   if (len != buf_size)
102 	return(EINVAL);
103 
104   return sys_datacopy_wrapper(SELF, src_addr, who_e, dst_addr, len);
105 }
106 
107 /*===========================================================================*
108  *				do_fcntl				     *
109  *===========================================================================*/
110 int do_fcntl(void)
111 {
112 /* Perform the fcntl(fd, cmd, ...) system call. */
113 
114   register struct filp *f;
115   int new_fd, fl, r = OK, fcntl_req, fcntl_argx;
116   tll_access_t locktype;
117 
118   fp->fp_fd = job_m_in.m_lc_vfs_fcntl.fd;
119   fp->fp_io_buffer = job_m_in.m_lc_vfs_fcntl.arg_ptr;
120   fp->fp_io_nbytes = job_m_in.m_lc_vfs_fcntl.cmd;
121   fcntl_req = job_m_in.m_lc_vfs_fcntl.cmd;
122   fcntl_argx = job_m_in.m_lc_vfs_fcntl.arg_int;
123 
124   /* Is the file descriptor valid? */
125   locktype = (fcntl_req == F_FREESP) ? VNODE_WRITE : VNODE_READ;
126   if ((f = get_filp(fp->fp_fd, locktype)) == NULL)
127 	return(err_code);
128 
129   switch (fcntl_req) {
130     case F_DUPFD:
131     case F_DUPFD_CLOEXEC:
132 	/* This replaces the old dup() system call. */
133 	if (fcntl_argx < 0 || fcntl_argx >= OPEN_MAX) r = EINVAL;
134 	else if ((r = get_fd(fp, fcntl_argx, 0, &new_fd, NULL)) == OK) {
135 		f->filp_count++;
136 		fp->fp_filp[new_fd] = f;
137 		assert(!FD_ISSET(new_fd, &fp->fp_cloexec_set));
138 		if (fcntl_req == F_DUPFD_CLOEXEC)
139 			FD_SET(new_fd, &fp->fp_cloexec_set);
140 		r = new_fd;
141 	}
142 	break;
143 
144     case F_GETFD:
145 	/* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
146 	r = 0;
147 	if (FD_ISSET(fp->fp_fd, &fp->fp_cloexec_set))
148 		r = FD_CLOEXEC;
149 	break;
150 
151     case F_SETFD:
152 	/* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
153 	if (fcntl_argx & FD_CLOEXEC)
154 		FD_SET(fp->fp_fd, &fp->fp_cloexec_set);
155 	else
156 		FD_CLR(fp->fp_fd, &fp->fp_cloexec_set);
157 	break;
158 
159     case F_GETFL:
160 	/* Get file status flags (O_NONBLOCK and O_APPEND). */
161 	fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE);
162 	r = fl;
163 	break;
164 
165     case F_SETFL:
166 	/* Set file status flags (O_NONBLOCK and O_APPEND). */
167 	fl = O_NONBLOCK | O_APPEND;
168 	f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl);
169 	break;
170 
171     case F_GETLK:
172     case F_SETLK:
173     case F_SETLKW:
174 	/* Set or clear a file lock. */
175 	r = lock_op(f, fcntl_req);
176 	break;
177 
178     case F_FREESP:
179      {
180 	/* Free a section of a file */
181 	off_t start, end, offset;
182 	struct flock flock_arg;
183 
184 	/* Check if it's a regular file. */
185 	if (!S_ISREG(f->filp_vno->v_mode)) r = EINVAL;
186 	else if (!(f->filp_mode & W_BIT)) r = EBADF;
187 	else {
188 		/* Copy flock data from userspace. */
189 		r = sys_datacopy_wrapper(who_e, fp->fp_io_buffer,
190 			SELF, (vir_bytes) &flock_arg, sizeof(flock_arg));
191 	}
192 
193 	if (r != OK) break;
194 
195 	/* Convert starting offset to signed. */
196 	offset = (off_t) flock_arg.l_start;
197 
198 	/* Figure out starting position base. */
199 	switch(flock_arg.l_whence) {
200 	  case SEEK_SET: start = 0; break;
201 	  case SEEK_CUR: start = f->filp_pos; break;
202 	  case SEEK_END: start = f->filp_vno->v_size; break;
203 	  default: r = EINVAL;
204 	}
205 	if (r != OK) break;
206 
207 	/* Check for overflow or underflow. */
208 	if (offset > 0 && start + offset < start) r = EINVAL;
209 	else if (offset < 0 && start + offset > start) r = EINVAL;
210 	else {
211 		start += offset;
212 		if (start < 0) r = EINVAL;
213 	}
214 	if (r != OK) break;
215 
216 	if (flock_arg.l_len != 0) {
217 		if (start >= f->filp_vno->v_size) r = EINVAL;
218 		else if ((end = start + flock_arg.l_len) <= start) r = EINVAL;
219 		else if (end > f->filp_vno->v_size) end = f->filp_vno->v_size;
220 	} else {
221                 end = 0;
222 	}
223 	if (r != OK) break;
224 
225 	r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr,start,end);
226 
227 	if (r == OK && flock_arg.l_len == 0)
228 		f->filp_vno->v_size = start;
229 
230 	break;
231      }
232     case F_GETNOSIGPIPE:
233 	r = !!(f->filp_flags & O_NOSIGPIPE);
234 	break;
235     case F_SETNOSIGPIPE:
236 	if (fcntl_argx)
237 		f->filp_flags |= O_NOSIGPIPE;
238 	else
239 		f->filp_flags &= ~O_NOSIGPIPE;
240 	break;
241     case F_FLUSH_FS_CACHE:
242     {
243 	struct vnode *vn = f->filp_vno;
244 	mode_t mode = f->filp_vno->v_mode;
245 	if (!super_user) {
246 		r = EPERM;
247 	} else if (S_ISBLK(mode)) {
248 		/* Block device; flush corresponding device blocks. */
249 		r = req_flush(vn->v_bfs_e, vn->v_sdev);
250 	} else if (S_ISREG(mode) || S_ISDIR(mode)) {
251 		/* Directory or regular file; flush hosting FS blocks. */
252 		r = req_flush(vn->v_fs_e, vn->v_dev);
253 	} else {
254 		/* Remaining cases.. Meaning unclear. */
255 		r = ENODEV;
256 	}
257 	break;
258     }
259     default:
260 	r = EINVAL;
261   }
262 
263   unlock_filp(f);
264   return(r);
265 }
266 
267 /*===========================================================================*
268  *				do_sync					     *
269  *===========================================================================*/
270 int do_sync(void)
271 {
272   struct vmnt *vmp;
273   int r = OK;
274 
275   for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
276 	if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
277 		break;
278 	if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE &&
279 		 vmp->m_root_node != NULL) {
280 		req_sync(vmp->m_fs_e);
281 	}
282 	unlock_vmnt(vmp);
283   }
284 
285   return(r);
286 }
287 
288 /*===========================================================================*
289  *				do_fsync				     *
290  *===========================================================================*/
291 int do_fsync(void)
292 {
293 /* Perform the fsync() system call. */
294   struct filp *rfilp;
295   struct vmnt *vmp;
296   dev_t dev;
297   int r = OK;
298 
299   fp->fp_fd = job_m_in.m_lc_vfs_fsync.fd;
300 
301   if ((rfilp = get_filp(fp->fp_fd, VNODE_READ)) == NULL)
302 	return(err_code);
303 
304   dev = rfilp->filp_vno->v_dev;
305   unlock_filp(rfilp);
306 
307   for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
308 	if (vmp->m_dev != dev) continue;
309 	if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
310 		break;
311 	if (vmp->m_dev != NO_DEV && vmp->m_dev == dev &&
312 		vmp->m_fs_e != NONE && vmp->m_root_node != NULL) {
313 
314 		req_sync(vmp->m_fs_e);
315 	}
316 	unlock_vmnt(vmp);
317   }
318 
319   return(r);
320 }
321 
322 int dupvm(struct fproc *rfp, int pfd, int *vmfd, struct filp **newfilp)
323 {
324 	int result, procfd;
325 	struct filp *f = NULL;
326 	struct fproc *vmf = fproc_addr(VM_PROC_NR);
327 
328 	*newfilp = NULL;
329 
330 	if ((f = get_filp2(rfp, pfd, VNODE_READ)) == NULL) {
331 		printf("VFS dupvm: get_filp2 failed\n");
332 		return EBADF;
333 	}
334 
335 	if(!(f->filp_vno->v_vmnt->m_fs_flags & RES_HASPEEK)) {
336 		unlock_filp(f);
337 #if 0	/* Noisy diagnostic for mmap() by ld.so */
338 		printf("VFS dupvm: no peek available\n");
339 #endif
340 		return EINVAL;
341 	}
342 
343 	assert(f->filp_vno);
344 	assert(f->filp_vno->v_vmnt);
345 
346 	if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode)) {
347 		printf("VFS: mmap regular/blockdev only; dev 0x%llx ino %llu has mode 0%o\n",
348 			f->filp_vno->v_dev, f->filp_vno->v_inode_nr, f->filp_vno->v_mode);
349 		unlock_filp(f);
350 		return EINVAL;
351 	}
352 
353 	/* get free FD in VM */
354 	if((result=get_fd(vmf, 0, 0, &procfd, NULL)) != OK) {
355 		unlock_filp(f);
356 		printf("VFS dupvm: getfd failed\n");
357 		return result;
358 	}
359 
360 	*vmfd = procfd;
361 
362 	f->filp_count++;
363 	assert(f->filp_count > 0);
364 	vmf->fp_filp[procfd] = f;
365 
366 	*newfilp = f;
367 
368 	return OK;
369 }
370 
371 /*===========================================================================*
372  *				do_vm_call				     *
373  *===========================================================================*/
374 int do_vm_call(void)
375 {
376 /* A call that VM does to VFS.
377  * We must reply with the fixed type VM_VFS_REPLY (and put our result info
378  * in the rest of the message) so VM can tell the difference between a
379  * request from VFS and a reply to this call.
380  */
381 	int req = job_m_in.VFS_VMCALL_REQ;
382 	int req_fd = job_m_in.VFS_VMCALL_FD;
383 	u32_t req_id = job_m_in.VFS_VMCALL_REQID;
384 	endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT;
385 	u64_t offset = job_m_in.VFS_VMCALL_OFFSET;
386 	u32_t length = job_m_in.VFS_VMCALL_LENGTH;
387 	int result = OK;
388 	int slot;
389 	struct fproc *rfp, *vmf;
390 	struct filp *f = NULL;
391 	int r;
392 
393 	if(job_m_in.m_source != VM_PROC_NR)
394 		return ENOSYS;
395 
396 	if(isokendpt(ep, &slot) != OK) rfp = NULL;
397 	else rfp = &fproc[slot];
398 
399 	vmf = fproc_addr(VM_PROC_NR);
400 	assert(fp == vmf);
401 	assert(rfp != vmf);
402 
403 	switch(req) {
404 		case VMVFSREQ_FDLOOKUP:
405 		{
406 			int procfd;
407 
408 			/* Lookup fd in referenced process. */
409 
410 			if(!rfp) {
411 				printf("VFS: why isn't ep %d here?!\n", ep);
412 				result = ESRCH;
413 				goto reqdone;
414 			}
415 
416 			if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) {
417 #if 0   /* Noisy diagnostic for mmap() by ld.so */
418 				printf("vfs: dupvm failed\n");
419 #endif
420 				goto reqdone;
421 			}
422 
423 			if(S_ISBLK(f->filp_vno->v_mode)) {
424 				assert(f->filp_vno->v_sdev != NO_DEV);
425 				job_m_out.VMV_DEV = f->filp_vno->v_sdev;
426 				job_m_out.VMV_INO = VMC_NO_INODE;
427 				job_m_out.VMV_SIZE_PAGES = LONG_MAX;
428 			} else {
429 				job_m_out.VMV_DEV = f->filp_vno->v_dev;
430 				job_m_out.VMV_INO = f->filp_vno->v_inode_nr;
431 				job_m_out.VMV_SIZE_PAGES =
432 					roundup(f->filp_vno->v_size,
433 						PAGE_SIZE)/PAGE_SIZE;
434 			}
435 
436 			job_m_out.VMV_FD = procfd;
437 
438 			result = OK;
439 
440 			break;
441 		}
442 		case VMVFSREQ_FDCLOSE:
443 		{
444 			result = close_fd(fp, req_fd);
445 			if(result != OK) {
446 				printf("VFS: VM fd close for fd %d, %d (%d)\n",
447 					req_fd, fp->fp_endpoint, result);
448 			}
449 			break;
450 		}
451 		case VMVFSREQ_FDIO:
452 		{
453 			result = actual_lseek(fp, req_fd, SEEK_SET, offset,
454 				NULL);
455 
456 			if(result == OK) {
457 				result = actual_read_write_peek(fp, PEEKING,
458 					req_fd, /* vir_bytes */ 0, length);
459 			}
460 
461 			break;
462 		}
463 		default:
464 			panic("VFS: bad request code from VM\n");
465 			break;
466 	}
467 
468 reqdone:
469 	if(f)
470 		unlock_filp(f);
471 
472 	/* fp is VM still. */
473 	assert(fp == vmf);
474 	job_m_out.VMV_ENDPOINT = ep;
475 	job_m_out.VMV_RESULT = result;
476 	job_m_out.VMV_REQID = req_id;
477 
478 	/* Reply asynchronously as VM may not be able to receive
479 	 * an ipc_sendnb() message.
480 	 */
481 	job_m_out.m_type = VM_VFS_REPLY;
482 	r = asynsend3(VM_PROC_NR, &job_m_out, 0);
483 	if(r != OK) printf("VFS: couldn't asynsend3() to VM\n");
484 
485 	/* VFS does not reply any further */
486 	return SUSPEND;
487 }
488 
489 /*===========================================================================*
490  *				pm_reboot				     *
491  *===========================================================================*/
492 void pm_reboot()
493 {
494 /* Perform the VFS side of the reboot call. This call is performed from the PM
495  * process context.
496  */
497   message m_out;
498   int i, r;
499   struct fproc *rfp, *pmfp;
500 
501   pmfp = fp;
502 
503   do_sync();
504 
505   /* Do exit processing for all leftover processes and servers, but don't
506    * actually exit them (if they were really gone, PM will tell us about it).
507    * Skip processes that handle parts of the file system; we first need to give
508    * them the chance to unmount (which should be possible as all normal
509    * processes have no open files anymore).
510    */
511   /* This is the only place where we allow special modification of "fp". The
512    * reboot procedure should really be implemented as a PM message broadcasted
513    * to all processes, so that each process will be shut down cleanly by a
514    * thread operating on its behalf. Doing everything here is simpler, but it
515    * requires an exception to the strict model of having "fp" be the process
516    * that owns the current worker thread.
517    */
518   for (i = 0; i < NR_PROCS; i++) {
519 	rfp = &fproc[i];
520 
521 	/* Don't just free the proc right away, but let it finish what it was
522 	 * doing first */
523 	if (rfp != fp) lock_proc(rfp);
524 	if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) {
525 		worker_set_proc(rfp);	/* temporarily fake process context */
526 		free_proc(0);
527 		worker_set_proc(pmfp);	/* restore original process context */
528 	}
529 	if (rfp != fp) unlock_proc(rfp);
530   }
531 
532   do_sync();
533   unmount_all(0 /* Don't force */);
534 
535   /* Try to exit all processes again including File Servers */
536   for (i = 0; i < NR_PROCS; i++) {
537 	rfp = &fproc[i];
538 
539 	/* Don't just free the proc right away, but let it finish what it was
540 	 * doing first */
541 	if (rfp != fp) lock_proc(rfp);
542 	if (rfp->fp_endpoint != NONE) {
543 		worker_set_proc(rfp);	/* temporarily fake process context */
544 		free_proc(0);
545 		worker_set_proc(pmfp);	/* restore original process context */
546 	}
547 	if (rfp != fp) unlock_proc(rfp);
548   }
549 
550   do_sync();
551   unmount_all(1 /* Force */);
552 
553   /* Reply to PM for synchronization */
554   memset(&m_out, 0, sizeof(m_out));
555 
556   m_out.m_type = VFS_PM_REBOOT_REPLY;
557 
558   if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK)
559 	panic("pm_reboot: ipc_send failed: %d", r);
560 }
561 
562 /*===========================================================================*
563  *				pm_fork					     *
564  *===========================================================================*/
565 void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid)
566 {
567 /* Perform those aspects of the fork() system call that relate to files.
568  * In particular, let the child inherit its parent's file descriptors.
569  * The parent and child parameters tell who forked off whom. The file
570  * system uses the same slot numbers as the kernel.  Only PM makes this call.
571  */
572 
573   struct fproc *cp, *pp;
574   int i, parentno, childno;
575   mutex_t c_fp_lock;
576 
577   /* Check up-to-dateness of fproc. */
578   okendpt(pproc, &parentno);
579 
580   /* PM gives child endpoint, which implies process slot information.
581    * Don't call isokendpt, because that will verify if the endpoint
582    * number is correct in fproc, which it won't be.
583    */
584   childno = _ENDPOINT_P(cproc);
585   if (childno < 0 || childno >= NR_PROCS)
586 	panic("VFS: bogus child for forking: %d", cproc);
587   if (fproc[childno].fp_pid != PID_FREE)
588 	panic("VFS: forking on top of in-use child: %d", childno);
589 
590   /* Copy the parent's fproc struct to the child. */
591   /* However, the mutex variables belong to a slot and must stay the same. */
592   c_fp_lock = fproc[childno].fp_lock;
593   fproc[childno] = fproc[parentno];
594   fproc[childno].fp_lock = c_fp_lock;
595 
596   /* Increase the counters in the 'filp' table. */
597   cp = &fproc[childno];
598   pp = &fproc[parentno];
599 
600   for (i = 0; i < OPEN_MAX; i++)
601 	if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++;
602 
603   /* Fill in new process and endpoint id. */
604   cp->fp_pid = cpid;
605   cp->fp_endpoint = cproc;
606 
607   /* A forking process never has an outstanding grant, as it isn't blocking on
608    * I/O. */
609   if (GRANT_VALID(pp->fp_grant)) {
610 	panic("VFS: fork: pp (endpoint %d) has grant %d\n", pp->fp_endpoint,
611 	       pp->fp_grant);
612   }
613   if (GRANT_VALID(cp->fp_grant)) {
614 	panic("VFS: fork: cp (endpoint %d) has grant %d\n", cp->fp_endpoint,
615 	       cp->fp_grant);
616   }
617 
618   /* A child is not a process leader, not being revived, etc. */
619   cp->fp_flags = FP_NOFLAGS;
620 
621   /* Record the fact that both root and working dir have another user. */
622   if (cp->fp_rd) dup_vnode(cp->fp_rd);
623   if (cp->fp_wd) dup_vnode(cp->fp_wd);
624 }
625 
626 /*===========================================================================*
627  *				free_proc				     *
628  *===========================================================================*/
629 static void free_proc(int flags)
630 {
631   int i;
632   register struct fproc *rfp;
633   register struct filp *rfilp;
634   register struct vnode *vp;
635   dev_t dev;
636 
637   if (fp->fp_endpoint == NONE)
638 	panic("free_proc: already free");
639 
640   if (fp_is_blocked(fp))
641 	unpause();
642 
643   /* Loop on file descriptors, closing any that are open. */
644   for (i = 0; i < OPEN_MAX; i++) {
645 	(void) close_fd(fp, i);
646   }
647 
648   /* Release root and working directories. */
649   if (fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; }
650   if (fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; }
651 
652   /* The rest of these actions is only done when processes actually exit. */
653   if (!(flags & FP_EXITING)) return;
654 
655   fp->fp_flags |= FP_EXITING;
656 
657   /* Check if any process is SUSPENDed on this driver.
658    * If a driver exits, unmap its entries in the dmap table.
659    * (unmapping has to be done after the first step, because the
660    * dmap table is used in the first step.)
661    */
662   unsuspend_by_endpt(fp->fp_endpoint);
663   dmap_unmap_by_endpt(fp->fp_endpoint);
664 
665   worker_stop_by_endpt(fp->fp_endpoint); /* Unblock waiting threads */
666   vmnt_unmap_by_endpt(fp->fp_endpoint); /* Invalidate open files if this
667 					     * was an active FS */
668 
669   /* If a session leader exits and it has a controlling tty, then revoke
670    * access to its controlling tty from all other processes using it.
671    */
672   if ((fp->fp_flags & FP_SESLDR) && fp->fp_tty != 0) {
673       dev = fp->fp_tty;
674       for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
675 	  if(rfp->fp_pid == PID_FREE) continue;
676           if (rfp->fp_tty == dev) rfp->fp_tty = 0;
677 
678           for (i = 0; i < OPEN_MAX; i++) {
679 		if ((rfilp = rfp->fp_filp[i]) == NULL) continue;
680 		if (rfilp->filp_mode == FILP_CLOSED) continue;
681 		vp = rfilp->filp_vno;
682 		if (!S_ISCHR(vp->v_mode)) continue;
683 		if (vp->v_sdev != dev) continue;
684 		lock_filp(rfilp, VNODE_READ);
685 		(void) cdev_close(dev); /* Ignore any errors. */
686 		/* FIXME: missing select check */
687 		rfilp->filp_mode = FILP_CLOSED;
688 		unlock_filp(rfilp);
689           }
690       }
691   }
692 
693   /* Exit done. Mark slot as free. */
694   fp->fp_endpoint = NONE;
695   fp->fp_pid = PID_FREE;
696   fp->fp_flags = FP_NOFLAGS;
697 }
698 
699 /*===========================================================================*
700  *				pm_exit					     *
701  *===========================================================================*/
702 void pm_exit(void)
703 {
704 /* Perform the file system portion of the exit(status) system call.
705  * This function is called from the context of the exiting process.
706  */
707 
708   free_proc(FP_EXITING);
709 }
710 
711 /*===========================================================================*
712  *				pm_setgid				     *
713  *===========================================================================*/
714 void pm_setgid(proc_e, egid, rgid)
715 endpoint_t proc_e;
716 int egid;
717 int rgid;
718 {
719   register struct fproc *tfp;
720   int slot;
721 
722   okendpt(proc_e, &slot);
723   tfp = &fproc[slot];
724 
725   tfp->fp_effgid =  egid;
726   tfp->fp_realgid = rgid;
727 }
728 
729 
730 /*===========================================================================*
731  *				pm_setgroups				     *
732  *===========================================================================*/
733 void pm_setgroups(proc_e, ngroups, groups)
734 endpoint_t proc_e;
735 int ngroups;
736 gid_t *groups;
737 {
738   struct fproc *rfp;
739   int slot;
740 
741   okendpt(proc_e, &slot);
742   rfp = &fproc[slot];
743   if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups))
744 	panic("VFS: pm_setgroups: too much data to copy");
745   if (sys_datacopy_wrapper(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups,
746 		   ngroups * sizeof(gid_t)) == OK) {
747 	rfp->fp_ngroups = ngroups;
748   } else
749 	panic("VFS: pm_setgroups: datacopy failed");
750 }
751 
752 
753 /*===========================================================================*
754  *				pm_setuid				     *
755  *===========================================================================*/
756 void pm_setuid(proc_e, euid, ruid)
757 endpoint_t proc_e;
758 int euid;
759 int ruid;
760 {
761   struct fproc *tfp;
762   int slot;
763 
764   okendpt(proc_e, &slot);
765   tfp = &fproc[slot];
766 
767   tfp->fp_effuid =  euid;
768   tfp->fp_realuid = ruid;
769 }
770 
771 /*===========================================================================*
772  *				pm_setsid				     *
773  *===========================================================================*/
774 void pm_setsid(endpoint_t proc_e)
775 {
776 /* Perform the VFS side of the SETSID call, i.e. get rid of the controlling
777  * terminal of a process, and make the process a session leader.
778  */
779   struct fproc *rfp;
780   int slot;
781 
782   /* Make the process a session leader with no controlling tty. */
783   okendpt(proc_e, &slot);
784   rfp = &fproc[slot];
785   rfp->fp_flags |= FP_SESLDR;
786   rfp->fp_tty = 0;
787 }
788 
789 /*===========================================================================*
790  *				do_svrctl				     *
791  *===========================================================================*/
792 int do_svrctl(void)
793 {
794   unsigned long svrctl;
795   vir_bytes ptr;
796 
797   svrctl = job_m_in.m_lc_svrctl.request;
798   ptr = job_m_in.m_lc_svrctl.arg;
799 
800   if (IOCGROUP(svrctl) != 'F') return(EINVAL);
801 
802   switch (svrctl) {
803     case VFSSETPARAM:
804     case VFSGETPARAM:
805 	{
806 		struct sysgetenv sysgetenv;
807 		char search_key[64];
808 		char val[64];
809 		int r, s;
810 
811 		/* Copy sysgetenv structure to VFS */
812 		if (sys_datacopy_wrapper(who_e, ptr, SELF, (vir_bytes) &sysgetenv,
813 				 sizeof(sysgetenv)) != OK)
814 			return(EFAULT);
815 
816 		/* Basic sanity checking */
817 		if (svrctl == VFSSETPARAM) {
818 			if (sysgetenv.keylen <= 0 ||
819 			    sysgetenv.keylen > (sizeof(search_key) - 1) ||
820 			    sysgetenv.vallen <= 0 ||
821 			    sysgetenv.vallen >= sizeof(val)) {
822 				return(EINVAL);
823 			}
824 		}
825 
826 		/* Copy parameter "key" */
827 		if ((s = sys_datacopy_wrapper(who_e, (vir_bytes) sysgetenv.key,
828 				      SELF, (vir_bytes) search_key,
829 				      sysgetenv.keylen)) != OK)
830 			return(s);
831 		search_key[sysgetenv.keylen] = '\0'; /* Limit string */
832 
833 		/* Is it a parameter we know? */
834 		if (svrctl == VFSSETPARAM) {
835 			if (!strcmp(search_key, "verbose")) {
836 				int verbose_val;
837 				if ((s = sys_datacopy_wrapper(who_e,
838 				    (vir_bytes) sysgetenv.val, SELF,
839 				    (vir_bytes) &val, sysgetenv.vallen)) != OK)
840 					return(s);
841 				val[sysgetenv.vallen] = '\0'; /* Limit string */
842 				verbose_val = atoi(val);
843 				if (verbose_val < 0 || verbose_val > 4) {
844 					return(EINVAL);
845 				}
846 				verbose = verbose_val;
847 				r = OK;
848 			} else {
849 				r = ESRCH;
850 			}
851 		} else { /* VFSGETPARAM */
852 			char small_buf[60];
853 
854 			r = ESRCH;
855 			if (!strcmp(search_key, "print_traces")) {
856 				mthread_stacktraces();
857 				sysgetenv.val = 0;
858 				sysgetenv.vallen = 0;
859 				r = OK;
860 			} else if (!strcmp(search_key, "active_threads")) {
861 				int active = NR_WTHREADS - worker_available();
862 				snprintf(small_buf, sizeof(small_buf) - 1,
863 					 "%d", active);
864 				sysgetenv.vallen = strlen(small_buf);
865 				r = OK;
866 			}
867 
868 			if (r == OK) {
869 				if ((s = sys_datacopy_wrapper(SELF,
870 				    (vir_bytes) &sysgetenv, who_e, ptr,
871 				    sizeof(sysgetenv))) != OK)
872 					return(s);
873 				if (sysgetenv.val != 0) {
874 					if ((s = sys_datacopy_wrapper(SELF,
875 					    (vir_bytes) small_buf, who_e,
876 					    (vir_bytes) sysgetenv.val,
877 					    sysgetenv.vallen)) != OK)
878 						return(s);
879 				}
880 			}
881 		}
882 
883 		return(r);
884 	}
885     default:
886 	return(EINVAL);
887   }
888 }
889 
890 /*===========================================================================*
891  *				pm_dumpcore				     *
892  *===========================================================================*/
893 int pm_dumpcore(int csig, vir_bytes exe_name)
894 {
895   int r, core_fd;
896   struct filp *f;
897   char core_path[PATH_MAX];
898   char proc_name[PROC_NAME_LEN];
899 
900   /* If a process is blocked, fp->fp_fd holds the fd it's blocked on. Free it
901    * up for use by common_open(). This step is the reason we cannot use this
902    * function to generate a core dump of a process while it is still running
903    * (i.e., without terminating it), as it changes the state of the process.
904    */
905   if (fp_is_blocked(fp))
906           unpause();
907 
908   /* open core file */
909   snprintf(core_path, PATH_MAX, "%s.%d", CORE_NAME, fp->fp_pid);
910   r = core_fd = common_open(core_path, O_WRONLY | O_CREAT | O_TRUNC,
911 	CORE_MODE, FALSE /*for_exec*/);
912   if (r < 0) goto core_exit;
913 
914   /* get process name */
915   r = sys_datacopy_wrapper(PM_PROC_NR, exe_name, VFS_PROC_NR,
916 	(vir_bytes) proc_name, PROC_NAME_LEN);
917   if (r != OK) goto core_exit;
918   proc_name[PROC_NAME_LEN - 1] = '\0';
919 
920   /* write the core dump */
921   f = get_filp(core_fd, VNODE_WRITE);
922   assert(f != NULL);
923   write_elf_core_file(f, csig, proc_name);
924   unlock_filp(f);
925 
926 core_exit:
927   /* The core file descriptor will be closed as part of the process exit. */
928   free_proc(FP_EXITING);
929 
930   return(r);
931 }
932 
933 /*===========================================================================*
934  *				 ds_event				     *
935  *===========================================================================*/
936 void
937 ds_event(void)
938 {
939   char key[DS_MAX_KEYLEN];
940   char *blkdrv_prefix = "drv.blk.";
941   char *chrdrv_prefix = "drv.chr.";
942   u32_t value;
943   int type, r, is_blk;
944   endpoint_t owner_endpoint;
945 
946   /* Get the event and the owner from DS. */
947   while ((r = ds_check(key, &type, &owner_endpoint)) == OK) {
948 	/* Only check for block and character driver up events. */
949 	if (!strncmp(key, blkdrv_prefix, strlen(blkdrv_prefix))) {
950 		is_blk = TRUE;
951 	} else if (!strncmp(key, chrdrv_prefix, strlen(chrdrv_prefix))) {
952 		is_blk = FALSE;
953 	} else {
954 		continue;
955 	}
956 
957 	if ((r = ds_retrieve_u32(key, &value)) != OK) {
958 		printf("VFS: ds_event: ds_retrieve_u32 failed\n");
959 		break;
960 	}
961 	if (value != DS_DRIVER_UP) continue;
962 
963 	/* Perform up. */
964 	dmap_endpt_up(owner_endpoint, is_blk);
965   }
966 
967   if (r != ENOENT) printf("VFS: ds_event: ds_check failed: %d\n", r);
968 }
969 
970 /* A function to be called on panic(). */
971 void panic_hook(void)
972 {
973   printf("VFS mthread stacktraces:\n");
974   mthread_stacktraces();
975 }
976 
977 /*===========================================================================*
978  *				do_getrusage				     *
979  *===========================================================================*/
980 int do_getrusage(void)
981 {
982 	/* Obsolete vfs_getrusage(2) call from userland. The getrusage call is
983 	 * now fully handled by PM, and for any future fields that should be
984 	 * supplied by VFS, VFS should be queried by PM rather than by the user
985 	 * program directly.  TODO: remove this call after the next release.
986 	 */
987 	return OK;
988 }
989